repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
AirFortressIlikara/LS2K0300-linux-4.19
11,595
arch/x86/crypto/sha1_ssse3_asm.S
/* * This is a SIMD SHA-1 implementation. It requires the Intel(R) Supplemental * SSE3 instruction set extensions introduced in Intel Core Microarchitecture * processors. CPUs supporting Intel(R) AVX extensions will get an additional * boost. * * This work was inspired by the vectorized implementation of Dean Gaudet. * Additional information on it can be found at: * http://www.arctic.org/~dean/crypto/sha1.html * * It was improved upon with more efficient vectorization of the message * scheduling. This implementation has also been optimized for all current and * several future generations of Intel CPUs. * * See this article for more information about the implementation details: * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/ * * Copyright (C) 2010, Intel Corp. * Authors: Maxim Locktyukhin <maxim.locktyukhin@intel.com> * Ronen Zohar <ronen.zohar@intel.com> * * Converted to AT&T syntax and adapted for inclusion in the Linux kernel: * Author: Mathias Krause <minipli@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> #define CTX %rdi // arg1 #define BUF %rsi // arg2 #define CNT %rdx // arg3 #define REG_A %ecx #define REG_B %esi #define REG_C %edi #define REG_D %r12d #define REG_E %edx #define REG_T1 %eax #define REG_T2 %ebx #define K_BASE %r8 #define HASH_PTR %r9 #define BUFFER_PTR %r10 #define BUFFER_END %r11 #define W_TMP1 %xmm0 #define W_TMP2 %xmm9 #define W0 %xmm1 #define W4 %xmm2 #define W8 %xmm3 #define W12 %xmm4 #define W16 %xmm5 #define W20 %xmm6 #define W24 %xmm7 #define W28 %xmm8 #define XMM_SHUFB_BSWAP %xmm10 /* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */ #define WK(t) (((t) & 15) * 4)(%rsp) #define W_PRECALC_AHEAD 16 /* * This macro implements the SHA-1 function's body for single 64-byte block * param: function's name */ .macro SHA1_VECTOR_ASM name ENTRY(\name) push %rbx push %r12 push %rbp mov %rsp, %rbp sub $64, %rsp # allocate workspace and $~15, %rsp # align stack mov CTX, HASH_PTR mov BUF, BUFFER_PTR shl $6, CNT # multiply by 64 add BUF, CNT mov CNT, BUFFER_END lea K_XMM_AR(%rip), K_BASE xmm_mov BSWAP_SHUFB_CTL(%rip), XMM_SHUFB_BSWAP SHA1_PIPELINED_MAIN_BODY # cleanup workspace mov $8, %ecx mov %rsp, %rdi xor %eax, %eax rep stosq mov %rbp, %rsp # deallocate workspace pop %rbp pop %r12 pop %rbx ret ENDPROC(\name) .endm /* * This macro implements 80 rounds of SHA-1 for one 64-byte block */ .macro SHA1_PIPELINED_MAIN_BODY INIT_REGALLOC mov (HASH_PTR), A mov 4(HASH_PTR), B mov 8(HASH_PTR), C mov 12(HASH_PTR), D mov 16(HASH_PTR), E .set i, 0 .rept W_PRECALC_AHEAD W_PRECALC i .set i, (i+1) .endr .align 4 1: RR F1,A,B,C,D,E,0 RR F1,D,E,A,B,C,2 RR F1,B,C,D,E,A,4 RR F1,E,A,B,C,D,6 RR F1,C,D,E,A,B,8 RR F1,A,B,C,D,E,10 RR F1,D,E,A,B,C,12 RR F1,B,C,D,E,A,14 RR F1,E,A,B,C,D,16 RR F1,C,D,E,A,B,18 RR F2,A,B,C,D,E,20 RR F2,D,E,A,B,C,22 RR F2,B,C,D,E,A,24 RR F2,E,A,B,C,D,26 RR F2,C,D,E,A,B,28 RR F2,A,B,C,D,E,30 RR F2,D,E,A,B,C,32 RR F2,B,C,D,E,A,34 RR F2,E,A,B,C,D,36 RR F2,C,D,E,A,B,38 RR F3,A,B,C,D,E,40 RR F3,D,E,A,B,C,42 RR F3,B,C,D,E,A,44 RR F3,E,A,B,C,D,46 RR F3,C,D,E,A,B,48 RR F3,A,B,C,D,E,50 RR F3,D,E,A,B,C,52 RR F3,B,C,D,E,A,54 RR F3,E,A,B,C,D,56 RR F3,C,D,E,A,B,58 add $64, BUFFER_PTR # move to the next 64-byte block cmp BUFFER_END, BUFFER_PTR # if the current is the last one use cmovae K_BASE, BUFFER_PTR # dummy source to avoid buffer overrun RR F4,A,B,C,D,E,60 RR F4,D,E,A,B,C,62 RR F4,B,C,D,E,A,64 RR F4,E,A,B,C,D,66 RR F4,C,D,E,A,B,68 RR F4,A,B,C,D,E,70 RR F4,D,E,A,B,C,72 RR F4,B,C,D,E,A,74 RR F4,E,A,B,C,D,76 RR F4,C,D,E,A,B,78 UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), B UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E RESTORE_RENAMED_REGS cmp K_BASE, BUFFER_PTR # K_BASE means, we reached the end jne 1b .endm .macro INIT_REGALLOC .set A, REG_A .set B, REG_B .set C, REG_C .set D, REG_D .set E, REG_E .set T1, REG_T1 .set T2, REG_T2 .endm .macro RESTORE_RENAMED_REGS # order is important (REG_C is where it should be) mov B, REG_B mov D, REG_D mov A, REG_A mov E, REG_E .endm .macro SWAP_REG_NAMES a, b .set _T, \a .set \a, \b .set \b, _T .endm .macro F1 b, c, d mov \c, T1 SWAP_REG_NAMES \c, T1 xor \d, T1 and \b, T1 xor \d, T1 .endm .macro F2 b, c, d mov \d, T1 SWAP_REG_NAMES \d, T1 xor \c, T1 xor \b, T1 .endm .macro F3 b, c ,d mov \c, T1 SWAP_REG_NAMES \c, T1 mov \b, T2 or \b, T1 and \c, T2 and \d, T1 or T2, T1 .endm .macro F4 b, c, d F2 \b, \c, \d .endm .macro UPDATE_HASH hash, val add \hash, \val mov \val, \hash .endm /* * RR does two rounds of SHA-1 back to back with W[] pre-calc * t1 = F(b, c, d); e += w(i) * e += t1; b <<= 30; d += w(i+1); * t1 = F(a, b, c); * d += t1; a <<= 5; * e += a; * t1 = e; a >>= 7; * t1 <<= 5; * d += t1; */ .macro RR F, a, b, c, d, e, round add WK(\round), \e \F \b, \c, \d # t1 = F(b, c, d); W_PRECALC (\round + W_PRECALC_AHEAD) rol $30, \b add T1, \e add WK(\round + 1), \d \F \a, \b, \c W_PRECALC (\round + W_PRECALC_AHEAD + 1) rol $5, \a add \a, \e add T1, \d ror $7, \a # (a <<r 5) >>r 7) => a <<r 30) mov \e, T1 SWAP_REG_NAMES \e, T1 rol $5, T1 add T1, \d # write: \a, \b # rotate: \a<=\d, \b<=\e, \c<=\a, \d<=\b, \e<=\c .endm .macro W_PRECALC r .set i, \r .if (i < 20) .set K_XMM, 0 .elseif (i < 40) .set K_XMM, 16 .elseif (i < 60) .set K_XMM, 32 .elseif (i < 80) .set K_XMM, 48 .endif .if ((i < 16) || ((i >= 80) && (i < (80 + W_PRECALC_AHEAD)))) .set i, ((\r) % 80) # pre-compute for the next iteration .if (i == 0) W_PRECALC_RESET .endif W_PRECALC_00_15 .elseif (i<32) W_PRECALC_16_31 .elseif (i < 80) // rounds 32-79 W_PRECALC_32_79 .endif .endm .macro W_PRECALC_RESET .set W, W0 .set W_minus_04, W4 .set W_minus_08, W8 .set W_minus_12, W12 .set W_minus_16, W16 .set W_minus_20, W20 .set W_minus_24, W24 .set W_minus_28, W28 .set W_minus_32, W .endm .macro W_PRECALC_ROTATE .set W_minus_32, W_minus_28 .set W_minus_28, W_minus_24 .set W_minus_24, W_minus_20 .set W_minus_20, W_minus_16 .set W_minus_16, W_minus_12 .set W_minus_12, W_minus_08 .set W_minus_08, W_minus_04 .set W_minus_04, W .set W, W_minus_32 .endm .macro W_PRECALC_SSSE3 .macro W_PRECALC_00_15 W_PRECALC_00_15_SSSE3 .endm .macro W_PRECALC_16_31 W_PRECALC_16_31_SSSE3 .endm .macro W_PRECALC_32_79 W_PRECALC_32_79_SSSE3 .endm /* message scheduling pre-compute for rounds 0-15 */ .macro W_PRECALC_00_15_SSSE3 .if ((i & 3) == 0) movdqu (i*4)(BUFFER_PTR), W_TMP1 .elseif ((i & 3) == 1) pshufb XMM_SHUFB_BSWAP, W_TMP1 movdqa W_TMP1, W .elseif ((i & 3) == 2) paddd (K_BASE), W_TMP1 .elseif ((i & 3) == 3) movdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm /* message scheduling pre-compute for rounds 16-31 * * - calculating last 32 w[i] values in 8 XMM registers * - pre-calculate K+w[i] values and store to mem, for later load by ALU add * instruction * * some "heavy-lifting" vectorization for rounds 16-31 due to w[i]->w[i-3] * dependency, but improves for 32-79 */ .macro W_PRECALC_16_31_SSSE3 # blended scheduling of vector and scalar instruction streams, one 4-wide # vector iteration / 4 scalar rounds .if ((i & 3) == 0) movdqa W_minus_12, W palignr $8, W_minus_16, W # w[i-14] movdqa W_minus_04, W_TMP1 psrldq $4, W_TMP1 # w[i-3] pxor W_minus_08, W .elseif ((i & 3) == 1) pxor W_minus_16, W_TMP1 pxor W_TMP1, W movdqa W, W_TMP2 movdqa W, W_TMP1 pslldq $12, W_TMP2 .elseif ((i & 3) == 2) psrld $31, W pslld $1, W_TMP1 por W, W_TMP1 movdqa W_TMP2, W psrld $30, W_TMP2 pslld $2, W .elseif ((i & 3) == 3) pxor W, W_TMP1 pxor W_TMP2, W_TMP1 movdqa W_TMP1, W paddd K_XMM(K_BASE), W_TMP1 movdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm /* message scheduling pre-compute for rounds 32-79 * * in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 * instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 * allows more efficient vectorization since w[i]=>w[i-3] dependency is broken */ .macro W_PRECALC_32_79_SSSE3 .if ((i & 3) == 0) movdqa W_minus_04, W_TMP1 pxor W_minus_28, W # W is W_minus_32 before xor palignr $8, W_minus_08, W_TMP1 .elseif ((i & 3) == 1) pxor W_minus_16, W pxor W_TMP1, W movdqa W, W_TMP1 .elseif ((i & 3) == 2) psrld $30, W pslld $2, W_TMP1 por W, W_TMP1 .elseif ((i & 3) == 3) movdqa W_TMP1, W paddd K_XMM(K_BASE), W_TMP1 movdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .endm // W_PRECALC_SSSE3 #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 .section .rodata .align 16 K_XMM_AR: .long K1, K1, K1, K1 .long K2, K2, K2, K2 .long K3, K3, K3, K3 .long K4, K4, K4, K4 BSWAP_SHUFB_CTL: .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .section .text W_PRECALC_SSSE3 .macro xmm_mov a, b movdqu \a,\b .endm /* SSSE3 optimized implementation: * extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws, * unsigned int rounds); */ SHA1_VECTOR_ASM sha1_transform_ssse3 #ifdef CONFIG_AS_AVX .macro W_PRECALC_AVX .purgem W_PRECALC_00_15 .macro W_PRECALC_00_15 W_PRECALC_00_15_AVX .endm .purgem W_PRECALC_16_31 .macro W_PRECALC_16_31 W_PRECALC_16_31_AVX .endm .purgem W_PRECALC_32_79 .macro W_PRECALC_32_79 W_PRECALC_32_79_AVX .endm .macro W_PRECALC_00_15_AVX .if ((i & 3) == 0) vmovdqu (i*4)(BUFFER_PTR), W_TMP1 .elseif ((i & 3) == 1) vpshufb XMM_SHUFB_BSWAP, W_TMP1, W .elseif ((i & 3) == 2) vpaddd (K_BASE), W, W_TMP1 .elseif ((i & 3) == 3) vmovdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .macro W_PRECALC_16_31_AVX .if ((i & 3) == 0) vpalignr $8, W_minus_16, W_minus_12, W # w[i-14] vpsrldq $4, W_minus_04, W_TMP1 # w[i-3] vpxor W_minus_08, W, W vpxor W_minus_16, W_TMP1, W_TMP1 .elseif ((i & 3) == 1) vpxor W_TMP1, W, W vpslldq $12, W, W_TMP2 vpslld $1, W, W_TMP1 .elseif ((i & 3) == 2) vpsrld $31, W, W vpor W, W_TMP1, W_TMP1 vpslld $2, W_TMP2, W vpsrld $30, W_TMP2, W_TMP2 .elseif ((i & 3) == 3) vpxor W, W_TMP1, W_TMP1 vpxor W_TMP2, W_TMP1, W vpaddd K_XMM(K_BASE), W, W_TMP1 vmovdqu W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .macro W_PRECALC_32_79_AVX .if ((i & 3) == 0) vpalignr $8, W_minus_08, W_minus_04, W_TMP1 vpxor W_minus_28, W, W # W is W_minus_32 before xor .elseif ((i & 3) == 1) vpxor W_minus_16, W_TMP1, W_TMP1 vpxor W_TMP1, W, W .elseif ((i & 3) == 2) vpslld $2, W, W_TMP1 vpsrld $30, W, W vpor W, W_TMP1, W .elseif ((i & 3) == 3) vpaddd K_XMM(K_BASE), W, W_TMP1 vmovdqu W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .endm // W_PRECALC_AVX W_PRECALC_AVX .purgem xmm_mov .macro xmm_mov a, b vmovdqu \a,\b .endm /* AVX optimized implementation: * extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws, * unsigned int rounds); */ SHA1_VECTOR_ASM sha1_transform_avx #endif
AirFortressIlikara/LS2K0300-linux-4.19
8,538
arch/x86/crypto/twofish-x86_64-asm_64.S
/*************************************************************************** * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ .file "twofish-x86_64-asm.S" .text #include <linux/linkage.h> #include <asm/asm-offsets.h> #define a_offset 0 #define b_offset 4 #define c_offset 8 #define d_offset 12 /* Structure of the crypto context struct*/ #define s0 0 /* S0 Array 256 Words each */ #define s1 1024 /* S1 Array */ #define s2 2048 /* S2 Array */ #define s3 3072 /* S3 Array */ #define w 4096 /* 8 whitening keys (word) */ #define k 4128 /* key 1-32 ( word ) */ /* define a few register aliases to allow macro substitution */ #define R0 %rax #define R0D %eax #define R0B %al #define R0H %ah #define R1 %rbx #define R1D %ebx #define R1B %bl #define R1H %bh #define R2 %rcx #define R2D %ecx #define R2B %cl #define R2H %ch #define R3 %rdx #define R3D %edx #define R3B %dl #define R3H %dh /* performs input whitening */ #define input_whitening(src,context,offset)\ xor w+offset(context), src; /* performs input whitening */ #define output_whitening(src,context,offset)\ xor w+16+offset(context), src; /* * a input register containing a (rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance */ #define encrypt_round(a,b,c,d,round)\ movzx b ## B, %edi;\ mov s1(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ mov s2(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s3(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ xor (%r11,%rdi,4), %r9d;\ movzx b ## H, %edi;\ ror $15, b ## D;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## H, %edi;\ xor s1(%r11,%rdi,4),%r9d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ rol $15, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D; /* * a input register containing a(rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance * during the round a and b are prepared for the output whitening */ #define encrypt_last_round(a,b,c,d,round)\ mov b ## D, %r10d;\ shl $32, %r10;\ movzx b ## B, %edi;\ mov s1(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ mov s2(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s3(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ xor (%r11,%rdi,4), %r9d;\ xor a, %r10;\ movzx b ## H, %edi;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## H, %edi;\ xor s1(%r11,%rdi,4),%r9d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ ror $1, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D /* * a input register containing a * b input register containing b (rotated 16) * c input register containing c (already rol $1) * d input register containing d * operations on a and b are interleaved to increase performance */ #define decrypt_round(a,b,c,d,round)\ movzx a ## B, %edi;\ mov (%r11,%rdi,4), %r9d;\ movzx b ## B, %edi;\ mov s3(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s1(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## B, %edi;\ xor s2(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s1(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $15, a ## D;\ xor s3(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ xor s2(%r11,%rdi,4),%r8d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D;\ rol $15, d ## D; /* * a input register containing a * b input register containing b * c input register containing c (already rol $1) * d input register containing d * operations on a and b are interleaved to increase performance * during the round a and b are prepared for the output whitening */ #define decrypt_last_round(a,b,c,d,round)\ movzx a ## B, %edi;\ mov (%r11,%rdi,4), %r9d;\ movzx b ## B, %edi;\ mov s3(%r11,%rdi,4),%r8d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## H, %edi;\ mov b ## D, %r10d;\ shl $32, %r10;\ xor a, %r10;\ ror $16, a ## D;\ xor s1(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s1(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ xor s2(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ xor s2(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ xor s3(%r11,%rdi,4),%r9d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D;\ ror $1, d ## D; ENTRY(twofish_enc_blk) pushq R1 /* %rdi contains the ctx address */ /* %rsi contains the output address */ /* %rdx contains the input address */ /* ctx address is moved to free one non-rex register as target for the 8bit high operations */ mov %rdi, %r11 movq (R3), R1 movq 8(R3), R3 input_whitening(R1,%r11,a_offset) input_whitening(R3,%r11,c_offset) mov R1D, R0D rol $16, R0D shr $32, R1 mov R3D, R2D shr $32, R3 rol $1, R3D encrypt_round(R0,R1,R2,R3,0); encrypt_round(R2,R3,R0,R1,8); encrypt_round(R0,R1,R2,R3,2*8); encrypt_round(R2,R3,R0,R1,3*8); encrypt_round(R0,R1,R2,R3,4*8); encrypt_round(R2,R3,R0,R1,5*8); encrypt_round(R0,R1,R2,R3,6*8); encrypt_round(R2,R3,R0,R1,7*8); encrypt_round(R0,R1,R2,R3,8*8); encrypt_round(R2,R3,R0,R1,9*8); encrypt_round(R0,R1,R2,R3,10*8); encrypt_round(R2,R3,R0,R1,11*8); encrypt_round(R0,R1,R2,R3,12*8); encrypt_round(R2,R3,R0,R1,13*8); encrypt_round(R0,R1,R2,R3,14*8); encrypt_last_round(R2,R3,R0,R1,15*8); output_whitening(%r10,%r11,a_offset) movq %r10, (%rsi) shl $32, R1 xor R0, R1 output_whitening(R1,%r11,c_offset) movq R1, 8(%rsi) popq R1 movl $1,%eax ret ENDPROC(twofish_enc_blk) ENTRY(twofish_dec_blk) pushq R1 /* %rdi contains the ctx address */ /* %rsi contains the output address */ /* %rdx contains the input address */ /* ctx address is moved to free one non-rex register as target for the 8bit high operations */ mov %rdi, %r11 movq (R3), R1 movq 8(R3), R3 output_whitening(R1,%r11,a_offset) output_whitening(R3,%r11,c_offset) mov R1D, R0D shr $32, R1 rol $16, R1D mov R3D, R2D shr $32, R3 rol $1, R2D decrypt_round(R0,R1,R2,R3,15*8); decrypt_round(R2,R3,R0,R1,14*8); decrypt_round(R0,R1,R2,R3,13*8); decrypt_round(R2,R3,R0,R1,12*8); decrypt_round(R0,R1,R2,R3,11*8); decrypt_round(R2,R3,R0,R1,10*8); decrypt_round(R0,R1,R2,R3,9*8); decrypt_round(R2,R3,R0,R1,8*8); decrypt_round(R0,R1,R2,R3,7*8); decrypt_round(R2,R3,R0,R1,6*8); decrypt_round(R0,R1,R2,R3,5*8); decrypt_round(R2,R3,R0,R1,4*8); decrypt_round(R0,R1,R2,R3,3*8); decrypt_round(R2,R3,R0,R1,2*8); decrypt_round(R0,R1,R2,R3,1*8); decrypt_last_round(R2,R3,R0,R1,0); input_whitening(%r10,%r11,a_offset) movq %r10, (%rsi) shl $32, R1 xor R0, R1 input_whitening(R1,%r11,c_offset) movq R1, 8(%rsi) popq R1 movl $1,%eax ret ENDPROC(twofish_dec_blk)
AirFortressIlikara/LS2K0300-linux-4.19
4,276
arch/x86/crypto/glue_helper-asm-avx.S
/* * Shared glue code for 128bit block ciphers, AVX assembler macros * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu (0*16)(src), x0; \ vmovdqu (1*16)(src), x1; \ vmovdqu (2*16)(src), x2; \ vmovdqu (3*16)(src), x3; \ vmovdqu (4*16)(src), x4; \ vmovdqu (5*16)(src), x5; \ vmovdqu (6*16)(src), x6; \ vmovdqu (7*16)(src), x7; #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu x0, (0*16)(dst); \ vmovdqu x1, (1*16)(dst); \ vmovdqu x2, (2*16)(dst); \ vmovdqu x3, (3*16)(dst); \ vmovdqu x4, (4*16)(dst); \ vmovdqu x5, (5*16)(dst); \ vmovdqu x6, (6*16)(dst); \ vmovdqu x7, (7*16)(dst); #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vpxor (0*16)(src), x1, x1; \ vpxor (1*16)(src), x2, x2; \ vpxor (2*16)(src), x3, x3; \ vpxor (3*16)(src), x4, x4; \ vpxor (4*16)(src), x5, x5; \ vpxor (5*16)(src), x6, x6; \ vpxor (6*16)(src), x7, x7; \ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; #define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \ vpcmpeqd t0, t0, t0; \ vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \ vmovdqa bswap, t1; \ \ /* load IV and byteswap */ \ vmovdqu (iv), x7; \ vpshufb t1, x7, x0; \ \ /* construct IVs */ \ inc_le128(x7, t0, t2); \ vpshufb t1, x7, x1; \ inc_le128(x7, t0, t2); \ vpshufb t1, x7, x2; \ inc_le128(x7, t0, t2); \ vpshufb t1, x7, x3; \ inc_le128(x7, t0, t2); \ vpshufb t1, x7, x4; \ inc_le128(x7, t0, t2); \ vpshufb t1, x7, x5; \ inc_le128(x7, t0, t2); \ vpshufb t1, x7, x6; \ inc_le128(x7, t0, t2); \ vmovdqa x7, t2; \ vpshufb t1, x7, x7; \ inc_le128(t2, t0, t1); \ vmovdqu t2, (iv); #define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vpxor (0*16)(src), x0, x0; \ vpxor (1*16)(src), x1, x1; \ vpxor (2*16)(src), x2, x2; \ vpxor (3*16)(src), x3, x3; \ vpxor (4*16)(src), x4, x4; \ vpxor (5*16)(src), x5, x5; \ vpxor (6*16)(src), x6, x6; \ vpxor (7*16)(src), x7, x7; \ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ vpaddq iv, iv, iv; \ vpshufd $0x13, tmp, tmp; \ vpand mask, tmp, tmp; \ vpxor tmp, iv, iv; #define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \ t1, xts_gf128mul_and_shl1_mask) \ vmovdqa xts_gf128mul_and_shl1_mask, t0; \ \ /* load IV */ \ vmovdqu (iv), tiv; \ vpxor (0*16)(src), tiv, x0; \ vmovdqu tiv, (0*16)(dst); \ \ /* construct and store IVs, also xor with source */ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (1*16)(src), tiv, x1; \ vmovdqu tiv, (1*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (2*16)(src), tiv, x2; \ vmovdqu tiv, (2*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (3*16)(src), tiv, x3; \ vmovdqu tiv, (3*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (4*16)(src), tiv, x4; \ vmovdqu tiv, (4*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (5*16)(src), tiv, x5; \ vmovdqu tiv, (5*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (6*16)(src), tiv, x6; \ vmovdqu tiv, (6*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vpxor (7*16)(src), tiv, x7; \ vmovdqu tiv, (7*16)(dst); \ \ gf128mul_x_ble(tiv, t0, t1); \ vmovdqu tiv, (iv); #define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vpxor (0*16)(dst), x0, x0; \ vpxor (1*16)(dst), x1, x1; \ vpxor (2*16)(dst), x2, x2; \ vpxor (3*16)(dst), x3, x3; \ vpxor (4*16)(dst), x4, x4; \ vpxor (5*16)(dst), x5, x5; \ vpxor (6*16)(dst), x6, x6; \ vpxor (7*16)(dst), x7, x7; \ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
AirFortressIlikara/LS2K0300-linux-4.19
13,333
arch/x86/crypto/sha512-ssse3-asm.S
######################################################################## # Implement fast SHA-512 with SSSE3 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # David Cote <david.m.cote@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-512 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## #include <linux/linkage.h> .text # Virtual Registers # ARG1 digest = %rdi # ARG2 msg = %rsi # ARG3 msglen = %rdx T1 = %rcx T2 = %r8 a_64 = %r9 b_64 = %r10 c_64 = %r11 d_64 = %r12 e_64 = %r13 f_64 = %r14 g_64 = %r15 h_64 = %rbx tmp0 = %rax # Local variables (stack frame) W_SIZE = 80*8 WK_SIZE = 2*8 RSPSAVE_SIZE = 1*8 GPRSAVE_SIZE = 5*8 frame_W = 0 frame_WK = frame_W + W_SIZE frame_RSPSAVE = frame_WK + WK_SIZE frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE frame_size = frame_GPRSAVE + GPRSAVE_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays # WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even # Input message (arg1) #define MSG(i) 8*i(msg) # Output Digest (arg2) #define DIGEST(i) 8*i(digest) # SHA Constants (static mem) #define K_t(i) 8*i+K512(%rip) # Message Schedule (stack frame) #define W_t(i) 8*i+frame_W(%rsp) # W[t]+K[t] (stack frame) #define WK_2(i) 8*((i%2))+frame_WK(%rsp) .macro RotateState # Rotate symbols a..h right TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = TMP .endm .macro SHA512_Round rnd # Compute Round %%t mov f_64, T1 # T1 = f mov e_64, tmp0 # tmp = e xor g_64, T1 # T1 = f ^ g ror $23, tmp0 # 41 # tmp = e ror 23 and e_64, T1 # T1 = (f ^ g) & e xor e_64, tmp0 # tmp = (e ror 23) ^ e xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g) idx = \rnd add WK_2(idx), T1 # W[t] + K[t] from message scheduler ror $4, tmp0 # 18 # tmp = ((e ror 23) ^ e) ror 4 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e mov a_64, T2 # T2 = a add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h ror $14, tmp0 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) mov a_64, tmp0 # tmp = a xor c_64, T2 # T2 = a ^ c and c_64, tmp0 # tmp = a & c and b_64, T2 # T2 = (a ^ c) & b xor tmp0, T2 # T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) mov a_64, tmp0 # tmp = a ror $5, tmp0 # 39 # tmp = a ror 5 xor a_64, tmp0 # tmp = (a ror 5) ^ a add T1, d_64 # e(next_state) = d + T1 ror $6, tmp0 # 34 # tmp = ((a ror 5) ^ a) ror 6 xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c) ror $28, tmp0 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) add tmp0, h_64 # a(next_state) = T1 + Maj(a,b,c) S0(a) RotateState .endm .macro SHA512_2Sched_2Round_sse rnd # Compute rounds t-2 and t-1 # Compute message schedule QWORDS t and t+1 # Two rounds are computed based on the values for K[t-2]+W[t-2] and # K[t-1]+W[t-1] which were previously stored at WK_2 by the message # scheduler. # The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. # They are then added to their respective SHA512 constants at # [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] # For brievity, the comments following vectored instructions only refer to # the first of a pair of QWORDS. # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} # The computation of the message schedule and the rounds are tightly # stitched to take advantage of instruction-level parallelism. # For clarity, integer instructions (for the rounds calculation) are indented # by one tab. Vectored instructions (for the message scheduler) are indented # by two tabs. mov f_64, T1 idx = \rnd -2 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2] xor g_64, T1 and e_64, T1 movdqa %xmm2, %xmm0 # XMM0 = W[t-2] xor g_64, T1 idx = \rnd add WK_2(idx), T1 idx = \rnd - 15 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15] mov e_64, tmp0 ror $23, tmp0 # 41 movdqa %xmm5, %xmm3 # XMM3 = W[t-15] xor e_64, tmp0 ror $4, tmp0 # 18 psrlq $61-19, %xmm0 # XMM0 = W[t-2] >> 42 xor e_64, tmp0 ror $14, tmp0 # 14 psrlq $(8-7), %xmm3 # XMM3 = W[t-15] >> 1 add tmp0, T1 add h_64, T1 pxor %xmm2, %xmm0 # XMM0 = (W[t-2] >> 42) ^ W[t-2] mov a_64, T2 xor c_64, T2 pxor %xmm5, %xmm3 # XMM3 = (W[t-15] >> 1) ^ W[t-15] and b_64, T2 mov a_64, tmp0 psrlq $(19-6), %xmm0 # XMM0 = ((W[t-2]>>42)^W[t-2])>>13 and c_64, tmp0 xor tmp0, T2 psrlq $(7-1), %xmm3 # XMM3 = ((W[t-15]>>1)^W[t-15])>>6 mov a_64, tmp0 ror $5, tmp0 # 39 pxor %xmm2, %xmm0 # XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] xor a_64, tmp0 ror $6, tmp0 # 34 pxor %xmm5, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15] xor a_64, tmp0 ror $28, tmp0 # 28 psrlq $6, %xmm0 # XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 add tmp0, T2 add T1, d_64 psrlq $1, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1 lea (T1, T2), h_64 RotateState movdqa %xmm2, %xmm1 # XMM1 = W[t-2] mov f_64, T1 xor g_64, T1 movdqa %xmm5, %xmm4 # XMM4 = W[t-15] and e_64, T1 xor g_64, T1 psllq $(64-19)-(64-61) , %xmm1 # XMM1 = W[t-2] << 42 idx = \rnd + 1 add WK_2(idx), T1 mov e_64, tmp0 psllq $(64-1)-(64-8), %xmm4 # XMM4 = W[t-15] << 7 ror $23, tmp0 # 41 xor e_64, tmp0 pxor %xmm2, %xmm1 # XMM1 = (W[t-2] << 42)^W[t-2] ror $4, tmp0 # 18 xor e_64, tmp0 pxor %xmm5, %xmm4 # XMM4 = (W[t-15]<<7)^W[t-15] ror $14, tmp0 # 14 add tmp0, T1 psllq $(64-61), %xmm1 # XMM1 = ((W[t-2] << 42)^W[t-2])<<3 add h_64, T1 mov a_64, T2 psllq $(64-8), %xmm4 # XMM4 = ((W[t-15]<<7)^W[t-15])<<56 xor c_64, T2 and b_64, T2 pxor %xmm1, %xmm0 # XMM0 = s1(W[t-2]) mov a_64, tmp0 and c_64, tmp0 idx = \rnd - 7 movdqu W_t(idx), %xmm1 # XMM1 = W[t-7] xor tmp0, T2 pxor %xmm4, %xmm3 # XMM3 = s0(W[t-15]) mov a_64, tmp0 paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) ror $5, tmp0 # 39 idx =\rnd-16 paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] xor a_64, tmp0 paddq %xmm1, %xmm0 # XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] ror $6, tmp0 # 34 movdqa %xmm0, W_t(\rnd) # Store scheduled qwords xor a_64, tmp0 paddq K_t(\rnd), %xmm0 # Compute W[t]+K[t] ror $28, tmp0 # 28 idx = \rnd movdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds add tmp0, T2 add T1, d_64 lea (T1, T2), h_64 RotateState .endm ######################################################################## # void sha512_transform_ssse3(void* D, const void* M, u64 L)# # Purpose: Updates the SHA512 digest stored at D with the message stored in M. # The size of the message pointed to by M must be an integer multiple of SHA512 # message blocks. # L is the message length in SHA512 blocks. ######################################################################## ENTRY(sha512_transform_ssse3) cmp $0, msglen je nowork # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp and $~(0x20 - 1), %rsp mov %rax, frame_RSPSAVE(%rsp) # Save GPRs mov %rbx, frame_GPRSAVE(%rsp) mov %r12, frame_GPRSAVE +8*1(%rsp) mov %r13, frame_GPRSAVE +8*2(%rsp) mov %r14, frame_GPRSAVE +8*3(%rsp) mov %r15, frame_GPRSAVE +8*4(%rsp) updateblock: # Load state variables mov DIGEST(0), a_64 mov DIGEST(1), b_64 mov DIGEST(2), c_64 mov DIGEST(3), d_64 mov DIGEST(4), e_64 mov DIGEST(5), f_64 mov DIGEST(6), g_64 mov DIGEST(7), h_64 t = 0 .rept 80/2 + 1 # (80 rounds) / (2 rounds/iteration) + (1 iteration) # +1 iteration because the scheduler leads hashing by 1 iteration .if t < 2 # BSWAP 2 QWORDS movdqa XMM_QWORD_BSWAP(%rip), %xmm1 movdqu MSG(t), %xmm0 pshufb %xmm1, %xmm0 # BSWAP movdqa %xmm0, W_t(t) # Store Scheduled Pair paddq K_t(t), %xmm0 # Compute W[t]+K[t] movdqa %xmm0, WK_2(t) # Store into WK for rounds .elseif t < 16 # BSWAP 2 QWORDS# Compute 2 Rounds movdqu MSG(t), %xmm0 pshufb %xmm1, %xmm0 # BSWAP SHA512_Round t-2 # Round t-2 movdqa %xmm0, W_t(t) # Store Scheduled Pair paddq K_t(t), %xmm0 # Compute W[t]+K[t] SHA512_Round t-1 # Round t-1 movdqa %xmm0, WK_2(t) # Store W[t]+K[t] into WK .elseif t < 79 # Schedule 2 QWORDS# Compute 2 Rounds SHA512_2Sched_2Round_sse t .else # Compute 2 Rounds SHA512_Round t-2 SHA512_Round t-1 .endif t = t+2 .endr # Update digest add a_64, DIGEST(0) add b_64, DIGEST(1) add c_64, DIGEST(2) add d_64, DIGEST(3) add e_64, DIGEST(4) add f_64, DIGEST(5) add g_64, DIGEST(6) add h_64, DIGEST(7) # Advance to next message block add $16*8, msg dec msglen jnz updateblock # Restore GPRs mov frame_GPRSAVE(%rsp), %rbx mov frame_GPRSAVE +8*1(%rsp), %r12 mov frame_GPRSAVE +8*2(%rsp), %r13 mov frame_GPRSAVE +8*3(%rsp), %r14 mov frame_GPRSAVE +8*4(%rsp), %r15 # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp nowork: ret ENDPROC(sha512_transform_ssse3) ######################################################################## ### Binary Data .section .rodata.cst16.XMM_QWORD_BSWAP, "aM", @progbits, 16 .align 16 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. XMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 # Mergeable 640-byte rodata section. This allows linker to merge the table # with other, exactly the same 640-byte fragment of another rodata section # (if such section exists). .section .rodata.cst640.K512, "aM", @progbits, 640 .align 64 # K[t] used in SHA512 hashing K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
AirFortressIlikara/LS2K0300-linux-4.19
11,279
arch/x86/crypto/cast6-avx-x86_64-asm_64.S
/* * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx.S" .file "cast6-avx-x86_64-asm_64.S" .extern cast_s1 .extern cast_s2 .extern cast_s3 .extern cast_s4 /* structure of crypto context */ #define km 0 #define kr (12*4*4) /* s-boxes */ #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 /********************************************************************** 8-way AVX cast6 **********************************************************************/ #define CTX %r15 #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RA2 %xmm4 #define RB2 %xmm5 #define RC2 %xmm6 #define RD2 %xmm7 #define RX %xmm8 #define RKM %xmm9 #define RKR %xmm10 #define RKRF %xmm11 #define RKRR %xmm12 #define R32 %xmm13 #define R1ST %xmm14 #define RTMP %xmm15 #define RID1 %rdi #define RID1d %edi #define RID2 %rsi #define RID2d %esi #define RGI1 %rdx #define RGI1bl %dl #define RGI1bh %dh #define RGI2 %rcx #define RGI2bl %cl #define RGI2bh %ch #define RGI3 %rax #define RGI3bl %al #define RGI3bh %ah #define RGI4 %rbx #define RGI4bl %bl #define RGI4bh %bh #define RFS1 %r8 #define RFS1d %r8d #define RFS2 %r9 #define RFS2d %r9d #define RFS3 %r10 #define RFS3d %r10d #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ shrq $16, src; \ movl s1(, RID1, 4), dst ## d; \ op1 s2(, RID2, 4), dst ## d; \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ interleave_op(il_reg); \ op2 s3(, RID1, 4), dst ## d; \ op3 s4(, RID2, 4), dst ## d; #define dummy(d) /* do nothing */ #define shr_next(reg) \ shrq $16, reg; #define F_head(a, x, gi1, gi2, op0) \ op0 a, RKM, x; \ vpslld RKRF, x, RTMP; \ vpsrld RKRR, x, x; \ vpor RTMP, x, x; \ \ vmovq x, gi1; \ vpextrq $1, x, gi2; #define F_tail(a, x, gi1, gi2, op1, op2, op3) \ lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ \ lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ shlq $32, RFS2; \ orq RFS1, RFS2; \ lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ shlq $32, RFS1; \ orq RFS1, RFS3; \ \ vmovq RFS2, x; \ vpinsrq $1, RFS3, x, x; #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ F_head(b1, RX, RGI1, RGI2, op0); \ F_head(b2, RX, RGI3, RGI4, op0); \ \ F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ \ vpxor a1, RX, a1; \ vpxor a2, RTMP, a2; #define F1_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) #define F2_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) #define F3_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) #define qop(in, out, f) \ F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2); #define get_round_keys(nn) \ vbroadcastss (km+(4*(nn)))(CTX), RKM; \ vpand R1ST, RKR, RKRF; \ vpsubq RKRF, R32, RKRR; \ vpsrldq $1, RKR, RKR; #define Q(n) \ get_round_keys(4*n+0); \ qop(RD, RC, 1); \ \ get_round_keys(4*n+1); \ qop(RC, RB, 2); \ \ get_round_keys(4*n+2); \ qop(RB, RA, 3); \ \ get_round_keys(4*n+3); \ qop(RA, RD, 1); #define QBAR(n) \ get_round_keys(4*n+3); \ qop(RA, RD, 1); \ \ get_round_keys(4*n+2); \ qop(RB, RA, 3); \ \ get_round_keys(4*n+1); \ qop(RC, RB, 2); \ \ get_round_keys(4*n+0); \ qop(RD, RC, 1); #define shuffle(mask) \ vpshufb mask, RKR, RKR; #define preload_rkr(n, do_mask, mask) \ vbroadcastss .L16_mask, RKR; \ /* add 16-bit rotation to key rotations (mod 32) */ \ vpxor (kr+n*16)(CTX), RKR, RKR; \ do_mask(mask); #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; \ vpshufb rmask, x2, x2; \ vpshufb rmask, x3, x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; \ vpshufb rmask, x2, x2; \ vpshufb rmask, x3, x3; .section .rodata.cst16, "aM", @progbits, 16 .align 16 .Lxts_gf128mul_and_shl1_mask: .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .Lbswap_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .Lrkr_enc_Q_Q_QBAR_QBAR: .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12 .Lrkr_enc_QBAR_QBAR_QBAR_QBAR: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lrkr_dec_Q_Q_Q_Q: .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 .Lrkr_dec_Q_Q_QBAR_QBAR: .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0 .Lrkr_dec_QBAR_QBAR_QBAR_QBAR: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst4.L16_mask, "aM", @progbits, 4 .align 4 .L16_mask: .byte 16, 16, 16, 16 .section .rodata.cst4.L32_mask, "aM", @progbits, 4 .align 4 .L32_mask: .byte 32, 0, 0, 0 .section .rodata.cst4.first_mask, "aM", @progbits, 4 .align 4 .Lfirst_mask: .byte 0x1f, 0, 0, 0 .text .align 8 __cast6_enc_blk8: /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); preload_rkr(0, dummy, none); Q(0); Q(1); Q(2); Q(3); preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR); Q(4); Q(5); QBAR(6); QBAR(7); preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR); QBAR(8); QBAR(9); QBAR(10); QBAR(11); popq %rbx; popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); ret; ENDPROC(__cast6_enc_blk8) .align 8 __cast6_dec_blk8: /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q); Q(11); Q(10); Q(9); Q(8); preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR); Q(7); Q(6); QBAR(5); QBAR(4); preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR); QBAR(3); QBAR(2); QBAR(1); QBAR(0); popq %rbx; popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); ret; ENDPROC(__cast6_dec_blk8) ENTRY(cast6_ecb_enc_8way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __cast6_enc_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; FRAME_END ret; ENDPROC(cast6_ecb_enc_8way) ENTRY(cast6_ecb_dec_8way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __cast6_dec_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; FRAME_END ret; ENDPROC(cast6_ecb_dec_8way) ENTRY(cast6_cbc_dec_8way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r12; pushq %r15; movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __cast6_dec_blk8; store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; popq %r12; FRAME_END ret; ENDPROC(cast6_cbc_dec_8way) ENTRY(cast6_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN pushq %r12; pushq %r15 movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX, RKR, RKM); call __cast6_enc_blk8; store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; popq %r12; FRAME_END ret; ENDPROC(cast6_ctr_8way) ENTRY(cast6_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN pushq %r15; movq %rdi, CTX movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); call __cast6_enc_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; FRAME_END ret; ENDPROC(cast6_xts_enc_8way) ENTRY(cast6_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN pushq %r15; movq %rdi, CTX movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); call __cast6_dec_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; FRAME_END ret; ENDPROC(cast6_xts_dec_8way)
AirFortressIlikara/LS2K0300-linux-4.19
6,124
arch/x86/crypto/crc32-pclmul_asm.S
/* GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see http://www.gnu.org/licenses * * Please visit http://www.xyratex.com/contact if you need additional * information or have any questions. * * GPL HEADER END */ /* * Copyright 2012 Xyratex Technology Limited * * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 * calculation. * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found * at: * http://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2B: Instruction Set Reference, N-Z * * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com> * Alexander Boyko <Alexander_Boyko@xyratex.com> */ #include <linux/linkage.h> #include <asm/inst.h> .section .rodata .align 16 /* * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 * #define CONSTANT_R1 0x154442bd4LL * * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 * #define CONSTANT_R2 0x1c6e41596LL */ .Lconstant_R2R1: .octa 0x00000001c6e415960000000154442bd4 /* * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 * #define CONSTANT_R3 0x1751997d0LL * * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e * #define CONSTANT_R4 0x0ccaa009eLL */ .Lconstant_R4R3: .octa 0x00000000ccaa009e00000001751997d0 /* * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 * #define CONSTANT_R5 0x163cd6124LL */ .Lconstant_R5: .octa 0x00000000000000000000000163cd6124 .Lconstant_mask32: .octa 0x000000000000000000000000FFFFFFFF /* * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL * * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL * #define CONSTANT_RU 0x1F7011641LL */ .Lconstant_RUpoly: .octa 0x00000001F701164100000001DB710641 #define CONSTANT %xmm0 #ifdef __x86_64__ #define BUF %rdi #define LEN %rsi #define CRC %edx #else #define BUF %eax #define LEN %edx #define CRC %ecx #endif .text /** * Calculate crc32 * BUF - buffer (16 bytes aligned) * LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63 * CRC - initial crc32 * return %eax crc32 * uint crc32_pclmul_le_16(unsigned char const *buffer, * size_t len, uint crc32) */ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ movdqa (BUF), %xmm1 movdqa 0x10(BUF), %xmm2 movdqa 0x20(BUF), %xmm3 movdqa 0x30(BUF), %xmm4 movd CRC, CONSTANT pxor CONSTANT, %xmm1 sub $0x40, LEN add $0x40, BUF cmp $0x40, LEN jb less_64 #ifdef __x86_64__ movdqa .Lconstant_R2R1(%rip), CONSTANT #else movdqa .Lconstant_R2R1, CONSTANT #endif loop_64:/* 64 bytes Full cache line folding */ prefetchnta 0x40(BUF) movdqa %xmm1, %xmm5 movdqa %xmm2, %xmm6 movdqa %xmm3, %xmm7 #ifdef __x86_64__ movdqa %xmm4, %xmm8 #endif PCLMULQDQ 00, CONSTANT, %xmm1 PCLMULQDQ 00, CONSTANT, %xmm2 PCLMULQDQ 00, CONSTANT, %xmm3 #ifdef __x86_64__ PCLMULQDQ 00, CONSTANT, %xmm4 #endif PCLMULQDQ 0x11, CONSTANT, %xmm5 PCLMULQDQ 0x11, CONSTANT, %xmm6 PCLMULQDQ 0x11, CONSTANT, %xmm7 #ifdef __x86_64__ PCLMULQDQ 0x11, CONSTANT, %xmm8 #endif pxor %xmm5, %xmm1 pxor %xmm6, %xmm2 pxor %xmm7, %xmm3 #ifdef __x86_64__ pxor %xmm8, %xmm4 #else /* xmm8 unsupported for x32 */ movdqa %xmm4, %xmm5 PCLMULQDQ 00, CONSTANT, %xmm4 PCLMULQDQ 0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm4 #endif pxor (BUF), %xmm1 pxor 0x10(BUF), %xmm2 pxor 0x20(BUF), %xmm3 pxor 0x30(BUF), %xmm4 sub $0x40, LEN add $0x40, BUF cmp $0x40, LEN jge loop_64 less_64:/* Folding cache line into 128bit */ #ifdef __x86_64__ movdqa .Lconstant_R4R3(%rip), CONSTANT #else movdqa .Lconstant_R4R3, CONSTANT #endif prefetchnta (BUF) movdqa %xmm1, %xmm5 PCLMULQDQ 0x00, CONSTANT, %xmm1 PCLMULQDQ 0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm5 PCLMULQDQ 0x00, CONSTANT, %xmm1 PCLMULQDQ 0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm3, %xmm1 movdqa %xmm1, %xmm5 PCLMULQDQ 0x00, CONSTANT, %xmm1 PCLMULQDQ 0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm4, %xmm1 cmp $0x10, LEN jb fold_64 loop_16:/* Folding rest buffer into 128bit */ movdqa %xmm1, %xmm5 PCLMULQDQ 0x00, CONSTANT, %xmm1 PCLMULQDQ 0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor (BUF), %xmm1 sub $0x10, LEN add $0x10, BUF cmp $0x10, LEN jge loop_16 fold_64: /* perform the last 64 bit fold, also adds 32 zeroes * to the input stream */ PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ psrldq $0x08, %xmm1 pxor CONSTANT, %xmm1 /* final 32-bit fold */ movdqa %xmm1, %xmm2 #ifdef __x86_64__ movdqa .Lconstant_R5(%rip), CONSTANT movdqa .Lconstant_mask32(%rip), %xmm3 #else movdqa .Lconstant_R5, CONSTANT movdqa .Lconstant_mask32, %xmm3 #endif psrldq $0x04, %xmm2 pand %xmm3, %xmm1 PCLMULQDQ 0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ #ifdef __x86_64__ movdqa .Lconstant_RUpoly(%rip), CONSTANT #else movdqa .Lconstant_RUpoly, CONSTANT #endif movdqa %xmm1, %xmm2 pand %xmm3, %xmm1 PCLMULQDQ 0x10, CONSTANT, %xmm1 pand %xmm3, %xmm1 PCLMULQDQ 0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 PEXTRD 0x01, %xmm1, %eax ret ENDPROC(crc32_pclmul_le_16)
AirFortressIlikara/LS2K0300-linux-4.19
104,846
arch/x86/crypto/aesni-intel_avx-x86_64.S
######################################################################## # Copyright (c) 2013, Intel Corporation # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES# LOSS OF USE, DATA, OR # PROFITS# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ######################################################################## ## ## Authors: ## Erdinc Ozturk <erdinc.ozturk@intel.com> ## Vinodh Gopal <vinodh.gopal@intel.com> ## James Guilford <james.guilford@intel.com> ## Tim Chen <tim.c.chen@linux.intel.com> ## ## References: ## This code was derived and highly optimized from the code described in paper: ## Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation ## on Intel Architecture Processors. August, 2010 ## The details of the implementation is explained in: ## Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode ## on Intel Architecture Processors. October, 2012. ## ## Assumptions: ## ## ## ## iv: ## 0 1 2 3 ## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | Salt (From the SA) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | Initialization Vector | ## | (This is the sequence number from IPSec header) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 0x1 | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## ## ## ## AAD: ## AAD padded to 128 bits with 0 ## for example, assume AAD is a u32 vector ## ## if AAD is 8 bytes: ## AAD[3] = {A0, A1}# ## padded AAD in xmm register = {A1 A0 0 0} ## ## 0 1 2 3 ## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | SPI (A1) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 32-bit Sequence Number (A0) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 0x0 | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## ## AAD Format with 32-bit Sequence Number ## ## if AAD is 12 bytes: ## AAD[3] = {A0, A1, A2}# ## padded AAD in xmm register = {A2 A1 A0 0} ## ## 0 1 2 3 ## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | SPI (A2) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 64-bit Extended Sequence Number {A1,A0} | ## | | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 0x0 | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## ## AAD Format with 64-bit Extended Sequence Number ## ## ## aadLen: ## from the definition of the spec, aadLen can only be 8 or 12 bytes. ## The code additionally supports aadLen of length 16 bytes. ## ## TLen: ## from the definition of the spec, TLen can only be 8, 12 or 16 bytes. ## ## poly = x^128 + x^127 + x^126 + x^121 + 1 ## throughout the code, one tab and two tab indentations are used. one tab is ## for GHASH part, two tabs is for AES part. ## #include <linux/linkage.h> #include <asm/inst.h> # constants in mergeable sections, linker can reorder and merge .section .rodata.cst16.POLY, "aM", @progbits, 16 .align 16 POLY: .octa 0xC2000000000000000000000000000001 .section .rodata.cst16.POLY2, "aM", @progbits, 16 .align 16 POLY2: .octa 0xC20000000000000000000001C2000000 .section .rodata.cst16.TWOONE, "aM", @progbits, 16 .align 16 TWOONE: .octa 0x00000001000000000000000000000001 .section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 .align 16 SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F .section .rodata.cst16.ONE, "aM", @progbits, 16 .align 16 ONE: .octa 0x00000000000000000000000000000001 .section .rodata.cst16.ONEf, "aM", @progbits, 16 .align 16 ONEf: .octa 0x01000000000000000000000000000000 # order of these constants should not change. # more specifically, ALL_F should follow SHIFT_MASK, and zero should follow ALL_F .section .rodata, "a", @progbits .align 16 SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 .section .rodata .align 16 .type aad_shift_arr, @object .size aad_shift_arr, 272 aad_shift_arr: .octa 0xffffffffffffffffffffffffffffffff .octa 0xffffffffffffffffffffffffffffff0C .octa 0xffffffffffffffffffffffffffff0D0C .octa 0xffffffffffffffffffffffffff0E0D0C .octa 0xffffffffffffffffffffffff0F0E0D0C .octa 0xffffffffffffffffffffff0C0B0A0908 .octa 0xffffffffffffffffffff0D0C0B0A0908 .octa 0xffffffffffffffffff0E0D0C0B0A0908 .octa 0xffffffffffffffff0F0E0D0C0B0A0908 .octa 0xffffffffffffff0C0B0A090807060504 .octa 0xffffffffffff0D0C0B0A090807060504 .octa 0xffffffffff0E0D0C0B0A090807060504 .octa 0xffffffff0F0E0D0C0B0A090807060504 .octa 0xffffff0C0B0A09080706050403020100 .octa 0xffff0D0C0B0A09080706050403020100 .octa 0xff0E0D0C0B0A09080706050403020100 .octa 0x0F0E0D0C0B0A09080706050403020100 .text ##define the fields of the gcm aes context #{ # u8 expanded_keys[16*11] store expanded keys # u8 shifted_hkey_1[16] store HashKey <<1 mod poly here # u8 shifted_hkey_2[16] store HashKey^2 <<1 mod poly here # u8 shifted_hkey_3[16] store HashKey^3 <<1 mod poly here # u8 shifted_hkey_4[16] store HashKey^4 <<1 mod poly here # u8 shifted_hkey_5[16] store HashKey^5 <<1 mod poly here # u8 shifted_hkey_6[16] store HashKey^6 <<1 mod poly here # u8 shifted_hkey_7[16] store HashKey^7 <<1 mod poly here # u8 shifted_hkey_8[16] store HashKey^8 <<1 mod poly here # u8 shifted_hkey_1_k[16] store XOR HashKey <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_2_k[16] store XOR HashKey^2 <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_3_k[16] store XOR HashKey^3 <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_4_k[16] store XOR HashKey^4 <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_5_k[16] store XOR HashKey^5 <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_6_k[16] store XOR HashKey^6 <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_7_k[16] store XOR HashKey^7 <<1 mod poly here (for Karatsuba purposes) # u8 shifted_hkey_8_k[16] store XOR HashKey^8 <<1 mod poly here (for Karatsuba purposes) #} gcm_ctx# HashKey = 16*11 # store HashKey <<1 mod poly here HashKey_2 = 16*12 # store HashKey^2 <<1 mod poly here HashKey_3 = 16*13 # store HashKey^3 <<1 mod poly here HashKey_4 = 16*14 # store HashKey^4 <<1 mod poly here HashKey_5 = 16*15 # store HashKey^5 <<1 mod poly here HashKey_6 = 16*16 # store HashKey^6 <<1 mod poly here HashKey_7 = 16*17 # store HashKey^7 <<1 mod poly here HashKey_8 = 16*18 # store HashKey^8 <<1 mod poly here HashKey_k = 16*19 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes) HashKey_2_k = 16*20 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes) HashKey_3_k = 16*21 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes) HashKey_4_k = 16*22 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes) HashKey_5_k = 16*23 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes) HashKey_6_k = 16*24 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes) HashKey_7_k = 16*25 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes) HashKey_8_k = 16*26 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes) #define arg1 %rdi #define arg2 %rsi #define arg3 %rdx #define arg4 %rcx #define arg5 %r8 #define arg6 %r9 #define arg7 STACK_OFFSET+8*1(%r14) #define arg8 STACK_OFFSET+8*2(%r14) #define arg9 STACK_OFFSET+8*3(%r14) i = 0 j = 0 out_order = 0 in_order = 1 DEC = 0 ENC = 1 .macro define_reg r n reg_\r = %xmm\n .endm .macro setreg .altmacro define_reg i %i define_reg j %j .noaltmacro .endm # need to push 4 registers into stack to maintain STACK_OFFSET = 8*4 TMP1 = 16*0 # Temporary storage for AAD TMP2 = 16*1 # Temporary storage for AES State 2 (State 1 is stored in an XMM register) TMP3 = 16*2 # Temporary storage for AES State 3 TMP4 = 16*3 # Temporary storage for AES State 4 TMP5 = 16*4 # Temporary storage for AES State 5 TMP6 = 16*5 # Temporary storage for AES State 6 TMP7 = 16*6 # Temporary storage for AES State 7 TMP8 = 16*7 # Temporary storage for AES State 8 VARIABLE_OFFSET = 16*8 ################################ # Utility Macros ################################ # Encryption of a single block .macro ENCRYPT_SINGLE_BLOCK XMM0 vpxor (arg1), \XMM0, \XMM0 i = 1 setreg .rep 9 vaesenc 16*i(arg1), \XMM0, \XMM0 i = (i+1) setreg .endr vaesenclast 16*10(arg1), \XMM0, \XMM0 .endm #ifdef CONFIG_AS_AVX ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) # Output: C = A*B*x mod poly, (i.e. >>1 ) # To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input # GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. ############################################################################### .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5 vpshufd $0b01001110, \GH, \T2 vpshufd $0b01001110, \HK, \T3 vpxor \GH , \T2, \T2 # T2 = (a1+a0) vpxor \HK , \T3, \T3 # T3 = (b1+b0) vpclmulqdq $0x11, \HK, \GH, \T1 # T1 = a1*b1 vpclmulqdq $0x00, \HK, \GH, \GH # GH = a0*b0 vpclmulqdq $0x00, \T3, \T2, \T2 # T2 = (a1+a0)*(b1+b0) vpxor \GH, \T2,\T2 vpxor \T1, \T2,\T2 # T2 = a0*b1+a1*b0 vpslldq $8, \T2,\T3 # shift-L T3 2 DWs vpsrldq $8, \T2,\T2 # shift-R T2 2 DWs vpxor \T3, \GH, \GH vpxor \T2, \T1, \T1 # <T1:GH> = GH x HK #first phase of the reduction vpslld $31, \GH, \T2 # packed right shifting << 31 vpslld $30, \GH, \T3 # packed right shifting shift << 30 vpslld $25, \GH, \T4 # packed right shifting shift << 25 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpsrldq $4, \T2, \T5 # shift-R T5 1 DW vpslldq $12, \T2, \T2 # shift-L T2 3 DWs vpxor \T2, \GH, \GH # first phase of the reduction complete #second phase of the reduction vpsrld $1,\GH, \T2 # packed left shifting >> 1 vpsrld $2,\GH, \T3 # packed left shifting >> 2 vpsrld $7,\GH, \T4 # packed left shifting >> 7 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpxor \T5, \T2, \T2 vpxor \T2, \GH, \GH vpxor \T1, \GH, \GH # the result is in GH .endm .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vmovdqa \HK, \T5 vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_2_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly vmovdqa \T5, HashKey_3(arg1) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_3_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly vmovdqa \T5, HashKey_4(arg1) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_4_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly vmovdqa \T5, HashKey_5(arg1) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_5_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly vmovdqa \T5, HashKey_6(arg1) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_6_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly vmovdqa \T5, HashKey_7(arg1) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_7_k(arg1) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly vmovdqa \T5, HashKey_8(arg1) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqa \T1, HashKey_8_k(arg1) .endm ## if a = number of total plaintext bytes ## b = floor(a/16) ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered ## arg1, arg2, arg3, r14 are used as a pointer only, not modified .macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC i = (8-\num_initial_blocks) j = 0 setreg mov arg6, %r10 # r10 = AAD mov arg7, %r12 # r12 = aadLen mov %r12, %r11 vpxor reg_j, reg_j, reg_j vpxor reg_i, reg_i, reg_i cmp $16, %r11 jl _get_AAD_rest8\@ _get_AAD_blocks\@: vmovdqu (%r10), reg_i vpshufb SHUF_MASK(%rip), reg_i, reg_i vpxor reg_i, reg_j, reg_j GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 add $16, %r10 sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\@ vmovdqu reg_j, reg_i cmp $0, %r11 je _get_AAD_done\@ vpxor reg_i, reg_i, reg_i /* read the last <16B of AAD. since we have at least 4B of data right after the AAD (the ICV, and maybe some CT), we can read 4B/8B blocks safely, and then get rid of the extra stuff */ _get_AAD_rest8\@: cmp $4, %r11 jle _get_AAD_rest4\@ movq (%r10), \T1 add $8, %r10 sub $8, %r11 vpslldq $8, \T1, \T1 vpsrldq $8, reg_i, reg_i vpxor \T1, reg_i, reg_i jmp _get_AAD_rest8\@ _get_AAD_rest4\@: cmp $0, %r11 jle _get_AAD_rest0\@ mov (%r10), %eax movq %rax, \T1 add $4, %r10 sub $4, %r11 vpslldq $12, \T1, \T1 vpsrldq $4, reg_i, reg_i vpxor \T1, reg_i, reg_i _get_AAD_rest0\@: /* finalize: shift out the extra bytes we read, and align left. since pslldq can only shift by an immediate, we use vpshufb and an array of shuffle masks */ movq %r12, %r11 salq $4, %r11 movdqu aad_shift_arr(%r11), \T1 vpshufb \T1, reg_i, reg_i _get_AAD_rest_final\@: vpshufb SHUF_MASK(%rip), reg_i, reg_i vpxor reg_j, reg_i, reg_i GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6 _get_AAD_done\@: # initialize the data pointer offset as zero xor %r11d, %r11d # start AES for num_initial_blocks blocks mov arg5, %rax # rax = *Y0 vmovdqu (%rax), \CTR # CTR = Y0 vpshufb SHUF_MASK(%rip), \CTR, \CTR i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, reg_i vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap i = (i+1) setreg .endr vmovdqa (arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor \T_key, reg_i, reg_i i = (i+1) setreg .endr j = 1 setreg .rep 9 vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenc \T_key, reg_i, reg_i i = (i+1) setreg .endr j = (j+1) setreg .endr vmovdqa 16*10(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenclast \T_key, reg_i, reg_i i = (i+1) setreg .endr i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vmovdqu (arg3, %r11), \T1 vpxor \T1, reg_i, reg_i vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for num_initial_blocks blocks add $16, %r11 .if \ENC_DEC == DEC vmovdqa \T1, reg_i .endif vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations i = (i+1) setreg .endr i = (8-\num_initial_blocks) j = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor reg_i, reg_j, reg_j GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks i = (i+1) j = (j+1) setreg .endr # XMM8 has the combined result here vmovdqa \XMM8, TMP1(%rsp) vmovdqa \XMM8, \T3 cmp $128, %r13 jl _initial_blocks_done\@ # no need for precomputed constants ############################################################################### # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM1 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM2 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM3 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM4 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM5 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM6 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM7 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM8 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vmovdqa (arg1), \T_key vpxor \T_key, \XMM1, \XMM1 vpxor \T_key, \XMM2, \XMM2 vpxor \T_key, \XMM3, \XMM3 vpxor \T_key, \XMM4, \XMM4 vpxor \T_key, \XMM5, \XMM5 vpxor \T_key, \XMM6, \XMM6 vpxor \T_key, \XMM7, \XMM7 vpxor \T_key, \XMM8, \XMM8 i = 1 setreg .rep 9 # do 9 rounds vmovdqa 16*i(arg1), \T_key vaesenc \T_key, \XMM1, \XMM1 vaesenc \T_key, \XMM2, \XMM2 vaesenc \T_key, \XMM3, \XMM3 vaesenc \T_key, \XMM4, \XMM4 vaesenc \T_key, \XMM5, \XMM5 vaesenc \T_key, \XMM6, \XMM6 vaesenc \T_key, \XMM7, \XMM7 vaesenc \T_key, \XMM8, \XMM8 i = (i+1) setreg .endr vmovdqa 16*i(arg1), \T_key vaesenclast \T_key, \XMM1, \XMM1 vaesenclast \T_key, \XMM2, \XMM2 vaesenclast \T_key, \XMM3, \XMM3 vaesenclast \T_key, \XMM4, \XMM4 vaesenclast \T_key, \XMM5, \XMM5 vaesenclast \T_key, \XMM6, \XMM6 vaesenclast \T_key, \XMM7, \XMM7 vaesenclast \T_key, \XMM8, \XMM8 vmovdqu (arg3, %r11), \T1 vpxor \T1, \XMM1, \XMM1 vmovdqu \XMM1, (arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM1 .endif vmovdqu 16*1(arg3, %r11), \T1 vpxor \T1, \XMM2, \XMM2 vmovdqu \XMM2, 16*1(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM2 .endif vmovdqu 16*2(arg3, %r11), \T1 vpxor \T1, \XMM3, \XMM3 vmovdqu \XMM3, 16*2(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM3 .endif vmovdqu 16*3(arg3, %r11), \T1 vpxor \T1, \XMM4, \XMM4 vmovdqu \XMM4, 16*3(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM4 .endif vmovdqu 16*4(arg3, %r11), \T1 vpxor \T1, \XMM5, \XMM5 vmovdqu \XMM5, 16*4(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM5 .endif vmovdqu 16*5(arg3, %r11), \T1 vpxor \T1, \XMM6, \XMM6 vmovdqu \XMM6, 16*5(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM6 .endif vmovdqu 16*6(arg3, %r11), \T1 vpxor \T1, \XMM7, \XMM7 vmovdqu \XMM7, 16*6(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM7 .endif vmovdqu 16*7(arg3, %r11), \T1 vpxor \T1, \XMM8, \XMM8 vmovdqu \XMM8, 16*7(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM8 .endif add $128, %r11 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with the corresponding ciphertext vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap ############################################################################### _initial_blocks_done\@: .endm # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks # arg1, arg2, arg3 are used as pointers only, not modified # r11 is the data offset value .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC vmovdqa \XMM1, \T2 vmovdqa \XMM2, TMP2(%rsp) vmovdqa \XMM3, TMP3(%rsp) vmovdqa \XMM4, TMP4(%rsp) vmovdqa \XMM5, TMP5(%rsp) vmovdqa \XMM6, TMP6(%rsp) vmovdqa \XMM7, TMP7(%rsp) vmovdqa \XMM8, TMP8(%rsp) .if \loop_idx == in_order vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONE(%rip), \XMM1, \XMM2 vpaddd ONE(%rip), \XMM2, \XMM3 vpaddd ONE(%rip), \XMM3, \XMM4 vpaddd ONE(%rip), \XMM4, \XMM5 vpaddd ONE(%rip), \XMM5, \XMM6 vpaddd ONE(%rip), \XMM6, \XMM7 vpaddd ONE(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap .else vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONEf(%rip), \XMM1, \XMM2 vpaddd ONEf(%rip), \XMM2, \XMM3 vpaddd ONEf(%rip), \XMM3, \XMM4 vpaddd ONEf(%rip), \XMM4, \XMM5 vpaddd ONEf(%rip), \XMM5, \XMM6 vpaddd ONEf(%rip), \XMM6, \XMM7 vpaddd ONEf(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR .endif ####################################################################### vmovdqu (arg1), \T1 vpxor \T1, \XMM1, \XMM1 vpxor \T1, \XMM2, \XMM2 vpxor \T1, \XMM3, \XMM3 vpxor \T1, \XMM4, \XMM4 vpxor \T1, \XMM5, \XMM5 vpxor \T1, \XMM6, \XMM6 vpxor \T1, \XMM7, \XMM7 vpxor \T1, \XMM8, \XMM8 ####################################################################### vmovdqu 16*1(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqu 16*2(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqa HashKey_8(arg1), \T5 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0 vpshufd $0b01001110, \T2, \T6 vpxor \T2, \T6, \T6 vmovdqa HashKey_8_k(arg1), \T5 vpclmulqdq $0x00, \T5, \T6, \T6 vmovdqu 16*3(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP2(%rsp), \T1 vmovdqa HashKey_7(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_7_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*4(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqa TMP3(%rsp), \T1 vmovdqa HashKey_6(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_6_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*5(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP4(%rsp), \T1 vmovdqa HashKey_5(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_5_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*6(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP5(%rsp), \T1 vmovdqa HashKey_4(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_4_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*7(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP6(%rsp), \T1 vmovdqa HashKey_3(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_3_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*8(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP7(%rsp), \T1 vmovdqa HashKey_2(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_2_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 ####################################################################### vmovdqu 16*9(arg1), \T5 vaesenc \T5, \XMM1, \XMM1 vaesenc \T5, \XMM2, \XMM2 vaesenc \T5, \XMM3, \XMM3 vaesenc \T5, \XMM4, \XMM4 vaesenc \T5, \XMM5, \XMM5 vaesenc \T5, \XMM6, \XMM6 vaesenc \T5, \XMM7, \XMM7 vaesenc \T5, \XMM8, \XMM8 vmovdqa TMP8(%rsp), \T1 vmovdqa HashKey(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqa HashKey_k(arg1), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vpxor \T4, \T6, \T6 vpxor \T7, \T6, \T6 vmovdqu 16*10(arg1), \T5 i = 0 j = 1 setreg .rep 8 vpxor 16*i(arg3, %r11), \T5, \T2 .if \ENC_DEC == ENC vaesenclast \T2, reg_j, reg_j .else vaesenclast \T2, reg_j, \T3 vmovdqu 16*i(arg3, %r11), reg_j vmovdqu \T3, 16*i(arg2, %r11) .endif i = (i+1) j = (j+1) setreg .endr ####################################################################### vpslldq $8, \T6, \T3 # shift-L T3 2 DWs vpsrldq $8, \T6, \T6 # shift-R T2 2 DWs vpxor \T3, \T7, \T7 vpxor \T4, \T6, \T6 # accumulate the results in T6:T7 ####################################################################### #first phase of the reduction ####################################################################### vpslld $31, \T7, \T2 # packed right shifting << 31 vpslld $30, \T7, \T3 # packed right shifting shift << 30 vpslld $25, \T7, \T4 # packed right shifting shift << 25 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpsrldq $4, \T2, \T1 # shift-R T1 1 DW vpslldq $12, \T2, \T2 # shift-L T2 3 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### .if \ENC_DEC == ENC vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer .endif ####################################################################### #second phase of the reduction vpsrld $1, \T7, \T2 # packed left shifting >> 1 vpsrld $2, \T7, \T3 # packed left shifting >> 2 vpsrld $7, \T7, \T4 # packed left shifting >> 7 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpxor \T1, \T2, \T2 vpxor \T2, \T7, \T7 vpxor \T7, \T6, \T6 # the result is in T6 ####################################################################### vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vpxor \T6, \XMM1, \XMM1 .endm # GHASH the last 4 ciphertext blocks. .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 ## Karatsuba Method vpshufd $0b01001110, \XMM1, \T2 vpxor \XMM1, \T2, \T2 vmovdqa HashKey_8(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM1, \T6 vpclmulqdq $0x00, \T5, \XMM1, \T7 vmovdqa HashKey_8_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \XMM1 ###################### vpshufd $0b01001110, \XMM2, \T2 vpxor \XMM2, \T2, \T2 vmovdqa HashKey_7(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM2, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM2, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_7_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM3, \T2 vpxor \XMM3, \T2, \T2 vmovdqa HashKey_6(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM3, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM3, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_6_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM4, \T2 vpxor \XMM4, \T2, \T2 vmovdqa HashKey_5(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM4, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM4, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_5_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM5, \T2 vpxor \XMM5, \T2, \T2 vmovdqa HashKey_4(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM5, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM5, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_4_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM6, \T2 vpxor \XMM6, \T2, \T2 vmovdqa HashKey_3(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM6, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM6, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_3_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM7, \T2 vpxor \XMM7, \T2, \T2 vmovdqa HashKey_2(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM7, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM7, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_2_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM8, \T2 vpxor \XMM8, \T2, \T2 vmovdqa HashKey(arg1), \T5 vpclmulqdq $0x11, \T5, \XMM8, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM8, \T4 vpxor \T4, \T7, \T7 vmovdqa HashKey_k(arg1), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 vpxor \T6, \XMM1, \XMM1 vpxor \T7, \XMM1, \T2 vpslldq $8, \T2, \T4 vpsrldq $8, \T2, \T2 vpxor \T4, \T7, \T7 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of # the accumulated carry-less multiplications ####################################################################### #first phase of the reduction vpslld $31, \T7, \T2 # packed right shifting << 31 vpslld $30, \T7, \T3 # packed right shifting shift << 30 vpslld $25, \T7, \T4 # packed right shifting shift << 25 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpsrldq $4, \T2, \T1 # shift-R T1 1 DW vpslldq $12, \T2, \T2 # shift-L T2 3 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### #second phase of the reduction vpsrld $1, \T7, \T2 # packed left shifting >> 1 vpsrld $2, \T7, \T3 # packed left shifting >> 2 vpsrld $7, \T7, \T4 # packed left shifting >> 7 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpxor \T1, \T2, \T2 vpxor \T2, \T7, \T7 vpxor \T7, \T6, \T6 # the result is in T6 .endm # combined for GCM encrypt and decrypt functions # clobbering all xmm registers # clobbering r10, r11, r12, r13, r14, r15 .macro GCM_ENC_DEC_AVX ENC_DEC #the number of pushes must equal STACK_OFFSET push %r12 push %r13 push %r14 push %r15 mov %rsp, %r14 sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey mov arg4, %r13 # save the number of bytes of plaintext/ciphertext and $-16, %r13 # r13 = r13 - (r13 mod 16) mov %r13, %r12 shr $4, %r12 and $7, %r12 jz _initial_num_blocks_is_0\@ cmp $7, %r12 je _initial_num_blocks_is_7\@ cmp $6, %r12 je _initial_num_blocks_is_6\@ cmp $5, %r12 je _initial_num_blocks_is_5\@ cmp $4, %r12 je _initial_num_blocks_is_4\@ cmp $3, %r12 je _initial_num_blocks_is_3\@ cmp $2, %r12 je _initial_num_blocks_is_2\@ jmp _initial_num_blocks_is_1\@ _initial_num_blocks_is_7\@: INITIAL_BLOCKS_AVX 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*7, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_6\@: INITIAL_BLOCKS_AVX 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*6, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_5\@: INITIAL_BLOCKS_AVX 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*5, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_4\@: INITIAL_BLOCKS_AVX 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*4, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_3\@: INITIAL_BLOCKS_AVX 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*3, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_2\@: INITIAL_BLOCKS_AVX 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*2, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_1\@: INITIAL_BLOCKS_AVX 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*1, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_0\@: INITIAL_BLOCKS_AVX 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC _initial_blocks_encrypted\@: cmp $0, %r13 je _zero_cipher_left\@ sub $128, %r13 je _eight_cipher_left\@ vmovd %xmm9, %r15d and $255, %r15d vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 _encrypt_by_8_new\@: cmp $(255-8), %r15d jg _encrypt_by_8\@ add $8, %r15b GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC add $128, %r11 sub $128, %r13 jne _encrypt_by_8_new\@ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 jmp _eight_cipher_left\@ _encrypt_by_8\@: vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 add $8, %r15b GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 add $128, %r11 sub $128, %r13 jne _encrypt_by_8_new\@ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 _eight_cipher_left\@: GHASH_LAST_8_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8 _zero_cipher_left\@: cmp $16, arg4 jl _only_less_than_16\@ mov arg4, %r13 and $15, %r13 # r13 = (arg4 mod 16) je _multiple_of_16_bytes\@ # handle the last <16 Byte block seperately vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) sub $16, %r11 add %r13, %r11 vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block lea SHIFT_MASK+16(%rip), %r12 sub %r13, %r12 # adjust the shuffle mask pointer to be # able to shift 16-r13 bytes (r13 is the # number of bytes in plaintext mod 16) vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes jmp _final_ghash_mul\@ _only_less_than_16\@: # check for 0 length mov arg4, %r13 and $15, %r13 # r13 = (arg4 mod 16) je _multiple_of_16_bytes\@ # handle the last <16 Byte block seperately vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) lea SHIFT_MASK+16(%rip), %r12 sub %r13, %r12 # adjust the shuffle mask pointer to be # able to shift 16-r13 bytes (r13 is the # number of bytes in plaintext mod 16) _get_last_16_byte_loop\@: movb (arg3, %r11), %al movb %al, TMP1 (%rsp , %r11) add $1, %r11 cmp %r13, %r11 jne _get_last_16_byte_loop\@ vmovdqu TMP1(%rsp), %xmm1 sub $16, %r11 _final_ghash_mul\@: .if \ENC_DEC == DEC vmovdqa %xmm1, %xmm2 vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm2, %xmm2 vpshufb SHUF_MASK(%rip), %xmm2, %xmm2 vpxor %xmm2, %xmm14, %xmm14 #GHASH computation for the last <16 Byte block GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 sub %r13, %r11 add $16, %r11 .else vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 vpxor %xmm9, %xmm14, %xmm14 #GHASH computation for the last <16 Byte block GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 sub %r13, %r11 add $16, %r11 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext .endif ############################# # output r13 Bytes vmovq %xmm9, %rax cmp $8, %r13 jle _less_than_8_bytes_left\@ mov %rax, (arg2 , %r11) add $8, %r11 vpsrldq $8, %xmm9, %xmm9 vmovq %xmm9, %rax sub $8, %r13 _less_than_8_bytes_left\@: movb %al, (arg2 , %r11) add $1, %r11 shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left\@ ############################# _multiple_of_16_bytes\@: mov arg7, %r12 # r12 = aadLen (number of bytes) shl $3, %r12 # convert into number of bits vmovd %r12d, %xmm15 # len(A) in xmm15 shl $3, arg4 # len(C) in bits (*128) vmovq arg4, %xmm1 vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000 vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C) vpxor %xmm15, %xmm14, %xmm14 GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap mov arg5, %rax # rax = *Y0 vmovdqu (%rax), %xmm9 # xmm9 = Y0 ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0) vpxor %xmm14, %xmm9, %xmm9 _return_T\@: mov arg8, %r10 # r10 = authTag mov arg9, %r11 # r11 = auth_tag_len cmp $16, %r11 je _T_16\@ cmp $8, %r11 jl _T_4\@ _T_8\@: vmovq %xmm9, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 vpsrldq $8, %xmm9, %xmm9 cmp $0, %r11 je _return_T_done\@ _T_4\@: vmovd %xmm9, %eax mov %eax, (%r10) add $4, %r10 sub $4, %r11 vpsrldq $4, %xmm9, %xmm9 cmp $0, %r11 je _return_T_done\@ _T_123\@: vmovd %xmm9, %eax cmp $2, %r11 jl _T_1\@ mov %ax, (%r10) cmp $2, %r11 je _return_T_done\@ add $2, %r10 sar $16, %eax _T_1\@: mov %al, (%r10) jmp _return_T_done\@ _T_16\@: vmovdqu %xmm9, (%r10) _return_T_done\@: mov %r14, %rsp pop %r15 pop %r14 pop %r13 pop %r12 .endm ############################################################# #void aesni_gcm_precomp_avx_gen2 # (gcm_data *my_ctx_data, # u8 *hash_subkey)# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */ ############################################################# ENTRY(aesni_gcm_precomp_avx_gen2) #the number of pushes must equal STACK_OFFSET push %r12 push %r13 push %r14 push %r15 mov %rsp, %r14 sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes vmovdqu (arg2), %xmm6 # xmm6 = HashKey vpshufb SHUF_MASK(%rip), %xmm6, %xmm6 ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey vmovdqa %xmm6, %xmm2 vpsllq $1, %xmm6, %xmm6 vpsrlq $63, %xmm2, %xmm2 vmovdqa %xmm2, %xmm1 vpslldq $8, %xmm2, %xmm2 vpsrldq $8, %xmm1, %xmm1 vpor %xmm2, %xmm6, %xmm6 #reduction vpshufd $0b00100100, %xmm1, %xmm2 vpcmpeqd TWOONE(%rip), %xmm2, %xmm2 vpand POLY(%rip), %xmm2, %xmm2 vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly ####################################################################### vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly PRECOMPUTE_AVX %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 mov %r14, %rsp pop %r15 pop %r14 pop %r13 pop %r12 ret ENDPROC(aesni_gcm_precomp_avx_gen2) ############################################################################### #void aesni_gcm_enc_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */ # const u8 *in, /* Plaintext input */ # u64 plaintext_len, /* Length of data in Bytes for encryption. */ # u8 *iv, /* Pre-counter block j0: 4 byte salt # (from Security Association) concatenated with 8 byte # Initialisation Vector (from IPSec ESP Payload) # concatenated with 0x00000001. 16-byte aligned pointer. */ # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### ENTRY(aesni_gcm_enc_avx_gen2) GCM_ENC_DEC_AVX ENC ret ENDPROC(aesni_gcm_enc_avx_gen2) ############################################################################### #void aesni_gcm_dec_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # u8 *out, /* Plaintext output. Decrypt in-place is allowed. */ # const u8 *in, /* Ciphertext input */ # u64 plaintext_len, /* Length of data in Bytes for encryption. */ # u8 *iv, /* Pre-counter block j0: 4 byte salt # (from Security Association) concatenated with 8 byte # Initialisation Vector (from IPSec ESP Payload) # concatenated with 0x00000001. 16-byte aligned pointer. */ # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### ENTRY(aesni_gcm_dec_avx_gen2) GCM_ENC_DEC_AVX DEC ret ENDPROC(aesni_gcm_dec_avx_gen2) #endif /* CONFIG_AS_AVX */ #ifdef CONFIG_AS_AVX2 ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) # Output: C = A*B*x mod poly, (i.e. >>1 ) # To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input # GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. ############################################################################### .macro GHASH_MUL_AVX2 GH HK T1 T2 T3 T4 T5 vpclmulqdq $0x11,\HK,\GH,\T1 # T1 = a1*b1 vpclmulqdq $0x00,\HK,\GH,\T2 # T2 = a0*b0 vpclmulqdq $0x01,\HK,\GH,\T3 # T3 = a1*b0 vpclmulqdq $0x10,\HK,\GH,\GH # GH = a0*b1 vpxor \T3, \GH, \GH vpsrldq $8 , \GH, \T3 # shift-R GH 2 DWs vpslldq $8 , \GH, \GH # shift-L GH 2 DWs vpxor \T3, \T1, \T1 vpxor \T2, \GH, \GH ####################################################################### #first phase of the reduction vmovdqa POLY2(%rip), \T3 vpclmulqdq $0x01, \GH, \T3, \T2 vpslldq $8, \T2, \T2 # shift-L T2 2 DWs vpxor \T2, \GH, \GH # first phase of the reduction complete ####################################################################### #second phase of the reduction vpclmulqdq $0x00, \GH, \T3, \T2 vpsrldq $4, \T2, \T2 # shift-R T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R) vpclmulqdq $0x10, \GH, \T3, \GH vpslldq $4, \GH, \GH # shift-L GH 1 DW (Shift-L 1-DW to obtain result with no shifts) vpxor \T2, \GH, \GH # second phase of the reduction complete ####################################################################### vpxor \T1, \GH, \GH # the result is in GH .endm .macro PRECOMPUTE_AVX2 HK T1 T2 T3 T4 T5 T6 # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vmovdqa \HK, \T5 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly vmovdqa \T5, HashKey_3(arg1) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly vmovdqa \T5, HashKey_4(arg1) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly vmovdqa \T5, HashKey_5(arg1) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly vmovdqa \T5, HashKey_6(arg1) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly vmovdqa \T5, HashKey_7(arg1) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly vmovdqa \T5, HashKey_8(arg1) .endm ## if a = number of total plaintext bytes ## b = floor(a/16) ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered ## arg1, arg2, arg3, r14 are used as a pointer only, not modified .macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER i = (8-\num_initial_blocks) j = 0 setreg mov arg6, %r10 # r10 = AAD mov arg7, %r12 # r12 = aadLen mov %r12, %r11 vpxor reg_j, reg_j, reg_j vpxor reg_i, reg_i, reg_i cmp $16, %r11 jl _get_AAD_rest8\@ _get_AAD_blocks\@: vmovdqu (%r10), reg_i vpshufb SHUF_MASK(%rip), reg_i, reg_i vpxor reg_i, reg_j, reg_j GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 add $16, %r10 sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\@ vmovdqu reg_j, reg_i cmp $0, %r11 je _get_AAD_done\@ vpxor reg_i, reg_i, reg_i /* read the last <16B of AAD. since we have at least 4B of data right after the AAD (the ICV, and maybe some CT), we can read 4B/8B blocks safely, and then get rid of the extra stuff */ _get_AAD_rest8\@: cmp $4, %r11 jle _get_AAD_rest4\@ movq (%r10), \T1 add $8, %r10 sub $8, %r11 vpslldq $8, \T1, \T1 vpsrldq $8, reg_i, reg_i vpxor \T1, reg_i, reg_i jmp _get_AAD_rest8\@ _get_AAD_rest4\@: cmp $0, %r11 jle _get_AAD_rest0\@ mov (%r10), %eax movq %rax, \T1 add $4, %r10 sub $4, %r11 vpslldq $12, \T1, \T1 vpsrldq $4, reg_i, reg_i vpxor \T1, reg_i, reg_i _get_AAD_rest0\@: /* finalize: shift out the extra bytes we read, and align left. since pslldq can only shift by an immediate, we use vpshufb and an array of shuffle masks */ movq %r12, %r11 salq $4, %r11 movdqu aad_shift_arr(%r11), \T1 vpshufb \T1, reg_i, reg_i _get_AAD_rest_final\@: vpshufb SHUF_MASK(%rip), reg_i, reg_i vpxor reg_j, reg_i, reg_i GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6 _get_AAD_done\@: # initialize the data pointer offset as zero xor %r11d, %r11d # start AES for num_initial_blocks blocks mov arg5, %rax # rax = *Y0 vmovdqu (%rax), \CTR # CTR = Y0 vpshufb SHUF_MASK(%rip), \CTR, \CTR i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, reg_i vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap i = (i+1) setreg .endr vmovdqa (arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor \T_key, reg_i, reg_i i = (i+1) setreg .endr j = 1 setreg .rep 9 vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenc \T_key, reg_i, reg_i i = (i+1) setreg .endr j = (j+1) setreg .endr vmovdqa 16*10(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenclast \T_key, reg_i, reg_i i = (i+1) setreg .endr i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vmovdqu (arg3, %r11), \T1 vpxor \T1, reg_i, reg_i vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for # num_initial_blocks blocks add $16, %r11 .if \ENC_DEC == DEC vmovdqa \T1, reg_i .endif vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations i = (i+1) setreg .endr i = (8-\num_initial_blocks) j = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor reg_i, reg_j, reg_j GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks i = (i+1) j = (j+1) setreg .endr # XMM8 has the combined result here vmovdqa \XMM8, TMP1(%rsp) vmovdqa \XMM8, \T3 cmp $128, %r13 jl _initial_blocks_done\@ # no need for precomputed constants ############################################################################### # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM1 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM2 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM3 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM4 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM5 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM6 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM7 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM8 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vmovdqa (arg1), \T_key vpxor \T_key, \XMM1, \XMM1 vpxor \T_key, \XMM2, \XMM2 vpxor \T_key, \XMM3, \XMM3 vpxor \T_key, \XMM4, \XMM4 vpxor \T_key, \XMM5, \XMM5 vpxor \T_key, \XMM6, \XMM6 vpxor \T_key, \XMM7, \XMM7 vpxor \T_key, \XMM8, \XMM8 i = 1 setreg .rep 9 # do 9 rounds vmovdqa 16*i(arg1), \T_key vaesenc \T_key, \XMM1, \XMM1 vaesenc \T_key, \XMM2, \XMM2 vaesenc \T_key, \XMM3, \XMM3 vaesenc \T_key, \XMM4, \XMM4 vaesenc \T_key, \XMM5, \XMM5 vaesenc \T_key, \XMM6, \XMM6 vaesenc \T_key, \XMM7, \XMM7 vaesenc \T_key, \XMM8, \XMM8 i = (i+1) setreg .endr vmovdqa 16*i(arg1), \T_key vaesenclast \T_key, \XMM1, \XMM1 vaesenclast \T_key, \XMM2, \XMM2 vaesenclast \T_key, \XMM3, \XMM3 vaesenclast \T_key, \XMM4, \XMM4 vaesenclast \T_key, \XMM5, \XMM5 vaesenclast \T_key, \XMM6, \XMM6 vaesenclast \T_key, \XMM7, \XMM7 vaesenclast \T_key, \XMM8, \XMM8 vmovdqu (arg3, %r11), \T1 vpxor \T1, \XMM1, \XMM1 vmovdqu \XMM1, (arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM1 .endif vmovdqu 16*1(arg3, %r11), \T1 vpxor \T1, \XMM2, \XMM2 vmovdqu \XMM2, 16*1(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM2 .endif vmovdqu 16*2(arg3, %r11), \T1 vpxor \T1, \XMM3, \XMM3 vmovdqu \XMM3, 16*2(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM3 .endif vmovdqu 16*3(arg3, %r11), \T1 vpxor \T1, \XMM4, \XMM4 vmovdqu \XMM4, 16*3(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM4 .endif vmovdqu 16*4(arg3, %r11), \T1 vpxor \T1, \XMM5, \XMM5 vmovdqu \XMM5, 16*4(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM5 .endif vmovdqu 16*5(arg3, %r11), \T1 vpxor \T1, \XMM6, \XMM6 vmovdqu \XMM6, 16*5(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM6 .endif vmovdqu 16*6(arg3, %r11), \T1 vpxor \T1, \XMM7, \XMM7 vmovdqu \XMM7, 16*6(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM7 .endif vmovdqu 16*7(arg3, %r11), \T1 vpxor \T1, \XMM8, \XMM8 vmovdqu \XMM8, 16*7(arg2 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM8 .endif add $128, %r11 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with # the corresponding ciphertext vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap ############################################################################### _initial_blocks_done\@: .endm # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks # arg1, arg2, arg3 are used as pointers only, not modified # r11 is the data offset value .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC vmovdqa \XMM1, \T2 vmovdqa \XMM2, TMP2(%rsp) vmovdqa \XMM3, TMP3(%rsp) vmovdqa \XMM4, TMP4(%rsp) vmovdqa \XMM5, TMP5(%rsp) vmovdqa \XMM6, TMP6(%rsp) vmovdqa \XMM7, TMP7(%rsp) vmovdqa \XMM8, TMP8(%rsp) .if \loop_idx == in_order vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONE(%rip), \XMM1, \XMM2 vpaddd ONE(%rip), \XMM2, \XMM3 vpaddd ONE(%rip), \XMM3, \XMM4 vpaddd ONE(%rip), \XMM4, \XMM5 vpaddd ONE(%rip), \XMM5, \XMM6 vpaddd ONE(%rip), \XMM6, \XMM7 vpaddd ONE(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap .else vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONEf(%rip), \XMM1, \XMM2 vpaddd ONEf(%rip), \XMM2, \XMM3 vpaddd ONEf(%rip), \XMM3, \XMM4 vpaddd ONEf(%rip), \XMM4, \XMM5 vpaddd ONEf(%rip), \XMM5, \XMM6 vpaddd ONEf(%rip), \XMM6, \XMM7 vpaddd ONEf(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR .endif ####################################################################### vmovdqu (arg1), \T1 vpxor \T1, \XMM1, \XMM1 vpxor \T1, \XMM2, \XMM2 vpxor \T1, \XMM3, \XMM3 vpxor \T1, \XMM4, \XMM4 vpxor \T1, \XMM5, \XMM5 vpxor \T1, \XMM6, \XMM6 vpxor \T1, \XMM7, \XMM7 vpxor \T1, \XMM8, \XMM8 ####################################################################### vmovdqu 16*1(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqu 16*2(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqa HashKey_8(arg1), \T5 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0 vpclmulqdq $0x01, \T5, \T2, \T6 # T6 = a1*b0 vpclmulqdq $0x10, \T5, \T2, \T5 # T5 = a0*b1 vpxor \T5, \T6, \T6 vmovdqu 16*3(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP2(%rsp), \T1 vmovdqa HashKey_7(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*4(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqa TMP3(%rsp), \T1 vmovdqa HashKey_6(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*5(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP4(%rsp), \T1 vmovdqa HashKey_5(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*6(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP5(%rsp), \T1 vmovdqa HashKey_4(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*7(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP6(%rsp), \T1 vmovdqa HashKey_3(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*8(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP7(%rsp), \T1 vmovdqa HashKey_2(arg1), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 ####################################################################### vmovdqu 16*9(arg1), \T5 vaesenc \T5, \XMM1, \XMM1 vaesenc \T5, \XMM2, \XMM2 vaesenc \T5, \XMM3, \XMM3 vaesenc \T5, \XMM4, \XMM4 vaesenc \T5, \XMM5, \XMM5 vaesenc \T5, \XMM6, \XMM6 vaesenc \T5, \XMM7, \XMM7 vaesenc \T5, \XMM8, \XMM8 vmovdqa TMP8(%rsp), \T1 vmovdqa HashKey(arg1), \T5 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T1 vmovdqu 16*10(arg1), \T5 i = 0 j = 1 setreg .rep 8 vpxor 16*i(arg3, %r11), \T5, \T2 .if \ENC_DEC == ENC vaesenclast \T2, reg_j, reg_j .else vaesenclast \T2, reg_j, \T3 vmovdqu 16*i(arg3, %r11), reg_j vmovdqu \T3, 16*i(arg2, %r11) .endif i = (i+1) j = (j+1) setreg .endr ####################################################################### vpslldq $8, \T6, \T3 # shift-L T3 2 DWs vpsrldq $8, \T6, \T6 # shift-R T2 2 DWs vpxor \T3, \T7, \T7 vpxor \T6, \T1, \T1 # accumulate the results in T1:T7 ####################################################################### #first phase of the reduction vmovdqa POLY2(%rip), \T3 vpclmulqdq $0x01, \T7, \T3, \T2 vpslldq $8, \T2, \T2 # shift-L xmm2 2 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### .if \ENC_DEC == ENC vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer .endif ####################################################################### #second phase of the reduction vpclmulqdq $0x00, \T7, \T3, \T2 vpsrldq $4, \T2, \T2 # shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R) vpclmulqdq $0x10, \T7, \T3, \T4 vpslldq $4, \T4, \T4 # shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts) vpxor \T2, \T4, \T4 # second phase of the reduction complete ####################################################################### vpxor \T4, \T1, \T1 # the result is in T1 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vpxor \T1, \XMM1, \XMM1 .endm # GHASH the last 4 ciphertext blocks. .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 ## Karatsuba Method vmovdqa HashKey_8(arg1), \T5 vpshufd $0b01001110, \XMM1, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM1, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM1, \T6 vpclmulqdq $0x00, \T5, \XMM1, \T7 vpclmulqdq $0x00, \T3, \T2, \XMM1 ###################### vmovdqa HashKey_7(arg1), \T5 vpshufd $0b01001110, \XMM2, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM2, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM2, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM2, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqa HashKey_6(arg1), \T5 vpshufd $0b01001110, \XMM3, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM3, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM3, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM3, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqa HashKey_5(arg1), \T5 vpshufd $0b01001110, \XMM4, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM4, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM4, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM4, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqa HashKey_4(arg1), \T5 vpshufd $0b01001110, \XMM5, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM5, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM5, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM5, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqa HashKey_3(arg1), \T5 vpshufd $0b01001110, \XMM6, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM6, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM6, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM6, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqa HashKey_2(arg1), \T5 vpshufd $0b01001110, \XMM7, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM7, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM7, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM7, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqa HashKey(arg1), \T5 vpshufd $0b01001110, \XMM8, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM8, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM8, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM8, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 vpxor \T6, \XMM1, \XMM1 vpxor \T7, \XMM1, \T2 vpslldq $8, \T2, \T4 vpsrldq $8, \T2, \T2 vpxor \T4, \T7, \T7 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of the # accumulated carry-less multiplications ####################################################################### #first phase of the reduction vmovdqa POLY2(%rip), \T3 vpclmulqdq $0x01, \T7, \T3, \T2 vpslldq $8, \T2, \T2 # shift-L xmm2 2 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### #second phase of the reduction vpclmulqdq $0x00, \T7, \T3, \T2 vpsrldq $4, \T2, \T2 # shift-R T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R) vpclmulqdq $0x10, \T7, \T3, \T4 vpslldq $4, \T4, \T4 # shift-L T4 1 DW (Shift-L 1-DW to obtain result with no shifts) vpxor \T2, \T4, \T4 # second phase of the reduction complete ####################################################################### vpxor \T4, \T6, \T6 # the result is in T6 .endm # combined for GCM encrypt and decrypt functions # clobbering all xmm registers # clobbering r10, r11, r12, r13, r14, r15 .macro GCM_ENC_DEC_AVX2 ENC_DEC #the number of pushes must equal STACK_OFFSET push %r12 push %r13 push %r14 push %r15 mov %rsp, %r14 sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey mov arg4, %r13 # save the number of bytes of plaintext/ciphertext and $-16, %r13 # r13 = r13 - (r13 mod 16) mov %r13, %r12 shr $4, %r12 and $7, %r12 jz _initial_num_blocks_is_0\@ cmp $7, %r12 je _initial_num_blocks_is_7\@ cmp $6, %r12 je _initial_num_blocks_is_6\@ cmp $5, %r12 je _initial_num_blocks_is_5\@ cmp $4, %r12 je _initial_num_blocks_is_4\@ cmp $3, %r12 je _initial_num_blocks_is_3\@ cmp $2, %r12 je _initial_num_blocks_is_2\@ jmp _initial_num_blocks_is_1\@ _initial_num_blocks_is_7\@: INITIAL_BLOCKS_AVX2 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*7, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_6\@: INITIAL_BLOCKS_AVX2 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*6, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_5\@: INITIAL_BLOCKS_AVX2 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*5, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_4\@: INITIAL_BLOCKS_AVX2 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*4, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_3\@: INITIAL_BLOCKS_AVX2 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*3, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_2\@: INITIAL_BLOCKS_AVX2 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*2, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_1\@: INITIAL_BLOCKS_AVX2 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*1, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_0\@: INITIAL_BLOCKS_AVX2 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC _initial_blocks_encrypted\@: cmp $0, %r13 je _zero_cipher_left\@ sub $128, %r13 je _eight_cipher_left\@ vmovd %xmm9, %r15d and $255, %r15d vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 _encrypt_by_8_new\@: cmp $(255-8), %r15d jg _encrypt_by_8\@ add $8, %r15b GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC add $128, %r11 sub $128, %r13 jne _encrypt_by_8_new\@ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 jmp _eight_cipher_left\@ _encrypt_by_8\@: vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 add $8, %r15b GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 add $128, %r11 sub $128, %r13 jne _encrypt_by_8_new\@ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 _eight_cipher_left\@: GHASH_LAST_8_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8 _zero_cipher_left\@: cmp $16, arg4 jl _only_less_than_16\@ mov arg4, %r13 and $15, %r13 # r13 = (arg4 mod 16) je _multiple_of_16_bytes\@ # handle the last <16 Byte block seperately vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) sub $16, %r11 add %r13, %r11 vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block lea SHIFT_MASK+16(%rip), %r12 sub %r13, %r12 # adjust the shuffle mask pointer # to be able to shift 16-r13 bytes # (r13 is the number of bytes in plaintext mod 16) vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes jmp _final_ghash_mul\@ _only_less_than_16\@: # check for 0 length mov arg4, %r13 and $15, %r13 # r13 = (arg4 mod 16) je _multiple_of_16_bytes\@ # handle the last <16 Byte block seperately vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn) lea SHIFT_MASK+16(%rip), %r12 sub %r13, %r12 # adjust the shuffle mask pointer to be # able to shift 16-r13 bytes (r13 is the # number of bytes in plaintext mod 16) _get_last_16_byte_loop\@: movb (arg3, %r11), %al movb %al, TMP1 (%rsp , %r11) add $1, %r11 cmp %r13, %r11 jne _get_last_16_byte_loop\@ vmovdqu TMP1(%rsp), %xmm1 sub $16, %r11 _final_ghash_mul\@: .if \ENC_DEC == DEC vmovdqa %xmm1, %xmm2 vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm2, %xmm2 vpshufb SHUF_MASK(%rip), %xmm2, %xmm2 vpxor %xmm2, %xmm14, %xmm14 #GHASH computation for the last <16 Byte block GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 sub %r13, %r11 add $16, %r11 .else vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 vpxor %xmm9, %xmm14, %xmm14 #GHASH computation for the last <16 Byte block GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 sub %r13, %r11 add $16, %r11 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext .endif ############################# # output r13 Bytes vmovq %xmm9, %rax cmp $8, %r13 jle _less_than_8_bytes_left\@ mov %rax, (arg2 , %r11) add $8, %r11 vpsrldq $8, %xmm9, %xmm9 vmovq %xmm9, %rax sub $8, %r13 _less_than_8_bytes_left\@: movb %al, (arg2 , %r11) add $1, %r11 shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left\@ ############################# _multiple_of_16_bytes\@: mov arg7, %r12 # r12 = aadLen (number of bytes) shl $3, %r12 # convert into number of bits vmovd %r12d, %xmm15 # len(A) in xmm15 shl $3, arg4 # len(C) in bits (*128) vmovq arg4, %xmm1 vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000 vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C) vpxor %xmm15, %xmm14, %xmm14 GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap mov arg5, %rax # rax = *Y0 vmovdqu (%rax), %xmm9 # xmm9 = Y0 ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0) vpxor %xmm14, %xmm9, %xmm9 _return_T\@: mov arg8, %r10 # r10 = authTag mov arg9, %r11 # r11 = auth_tag_len cmp $16, %r11 je _T_16\@ cmp $8, %r11 jl _T_4\@ _T_8\@: vmovq %xmm9, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 vpsrldq $8, %xmm9, %xmm9 cmp $0, %r11 je _return_T_done\@ _T_4\@: vmovd %xmm9, %eax mov %eax, (%r10) add $4, %r10 sub $4, %r11 vpsrldq $4, %xmm9, %xmm9 cmp $0, %r11 je _return_T_done\@ _T_123\@: vmovd %xmm9, %eax cmp $2, %r11 jl _T_1\@ mov %ax, (%r10) cmp $2, %r11 je _return_T_done\@ add $2, %r10 sar $16, %eax _T_1\@: mov %al, (%r10) jmp _return_T_done\@ _T_16\@: vmovdqu %xmm9, (%r10) _return_T_done\@: mov %r14, %rsp pop %r15 pop %r14 pop %r13 pop %r12 .endm ############################################################# #void aesni_gcm_precomp_avx_gen4 # (gcm_data *my_ctx_data, # u8 *hash_subkey)# /* H, the Hash sub key input. # Data starts on a 16-byte boundary. */ ############################################################# ENTRY(aesni_gcm_precomp_avx_gen4) #the number of pushes must equal STACK_OFFSET push %r12 push %r13 push %r14 push %r15 mov %rsp, %r14 sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes vmovdqu (arg2), %xmm6 # xmm6 = HashKey vpshufb SHUF_MASK(%rip), %xmm6, %xmm6 ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey vmovdqa %xmm6, %xmm2 vpsllq $1, %xmm6, %xmm6 vpsrlq $63, %xmm2, %xmm2 vmovdqa %xmm2, %xmm1 vpslldq $8, %xmm2, %xmm2 vpsrldq $8, %xmm1, %xmm1 vpor %xmm2, %xmm6, %xmm6 #reduction vpshufd $0b00100100, %xmm1, %xmm2 vpcmpeqd TWOONE(%rip), %xmm2, %xmm2 vpand POLY(%rip), %xmm2, %xmm2 vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly ####################################################################### vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly PRECOMPUTE_AVX2 %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 mov %r14, %rsp pop %r15 pop %r14 pop %r13 pop %r12 ret ENDPROC(aesni_gcm_precomp_avx_gen4) ############################################################################### #void aesni_gcm_enc_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */ # const u8 *in, /* Plaintext input */ # u64 plaintext_len, /* Length of data in Bytes for encryption. */ # u8 *iv, /* Pre-counter block j0: 4 byte salt # (from Security Association) concatenated with 8 byte # Initialisation Vector (from IPSec ESP Payload) # concatenated with 0x00000001. 16-byte aligned pointer. */ # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### ENTRY(aesni_gcm_enc_avx_gen4) GCM_ENC_DEC_AVX2 ENC ret ENDPROC(aesni_gcm_enc_avx_gen4) ############################################################################### #void aesni_gcm_dec_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # u8 *out, /* Plaintext output. Decrypt in-place is allowed. */ # const u8 *in, /* Ciphertext input */ # u64 plaintext_len, /* Length of data in Bytes for encryption. */ # u8 *iv, /* Pre-counter block j0: 4 byte salt # (from Security Association) concatenated with 8 byte # Initialisation Vector (from IPSec ESP Payload) # concatenated with 0x00000001. 16-byte aligned pointer. */ # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### ENTRY(aesni_gcm_dec_avx_gen4) GCM_ENC_DEC_AVX2 DEC ret ENDPROC(aesni_gcm_dec_avx_gen4) #endif /* CONFIG_AS_AVX2 */
AirFortressIlikara/LS2K0300-linux-4.19
10,605
arch/x86/crypto/sha256_ni_asm.S
/* * Intel SHA Extensions optimized implementation of a SHA-256 update function * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Sean Gulley <sean.m.gulley@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2015 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/linkage.h> #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ #define SHA256CONSTANTS %rax #define MSG %xmm0 #define STATE0 %xmm1 #define STATE1 %xmm2 #define MSGTMP0 %xmm3 #define MSGTMP1 %xmm4 #define MSGTMP2 %xmm5 #define MSGTMP3 %xmm6 #define MSGTMP4 %xmm7 #define SHUF_MASK %xmm8 #define ABEF_SAVE %xmm9 #define CDGH_SAVE %xmm10 /* * Intel SHA Extensions optimized implementation of a SHA-256 update function * * The function takes a pointer to the current hash values, a pointer to the * input data, and a number of 64 byte blocks to process. Once all blocks have * been processed, the digest pointer is updated with the resulting hash value. * The function only processes complete blocks, there is no functionality to * store partial blocks. All message padding and hash value initialization must * be done outside the update function. * * The indented lines in the loop are instructions related to rounds processing. * The non-indented lines are instructions related to the message schedule. * * void sha256_ni_transform(uint32_t *digest, const void *data, uint32_t numBlocks); * digest : pointer to digest * data: pointer to input data * numBlocks: Number of blocks to process */ .text .align 32 ENTRY(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash add DATA_PTR, NUM_BLKS /* pointer to end of data */ /* * load initial hash values * Need to reorder these appropriately * DCBA, HGFE -> ABEF, CDGH */ movdqu 0*16(DIGEST_PTR), STATE0 movdqu 1*16(DIGEST_PTR), STATE1 pshufd $0xB1, STATE0, STATE0 /* CDAB */ pshufd $0x1B, STATE1, STATE1 /* EFGH */ movdqa STATE0, MSGTMP4 palignr $8, STATE1, STATE0 /* ABEF */ pblendw $0xF0, MSGTMP4, STATE1 /* CDGH */ movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK lea K256(%rip), SHA256CONSTANTS .Lloop0: /* Save hash values for addition after rounds */ movdqa STATE0, ABEF_SAVE movdqa STATE1, CDGH_SAVE /* Rounds 0-3 */ movdqu 0*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP0 paddd 0*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Rounds 4-7 */ movdqu 1*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP1 paddd 1*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP1, MSGTMP0 /* Rounds 8-11 */ movdqu 2*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP2 paddd 2*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP2, MSGTMP1 /* Rounds 12-15 */ movdqu 3*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP3 paddd 3*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP3, MSGTMP4 palignr $4, MSGTMP2, MSGTMP4 paddd MSGTMP4, MSGTMP0 sha256msg2 MSGTMP3, MSGTMP0 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP3, MSGTMP2 /* Rounds 16-19 */ movdqa MSGTMP0, MSG paddd 4*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP0, MSGTMP4 palignr $4, MSGTMP3, MSGTMP4 paddd MSGTMP4, MSGTMP1 sha256msg2 MSGTMP0, MSGTMP1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP0, MSGTMP3 /* Rounds 20-23 */ movdqa MSGTMP1, MSG paddd 5*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP1, MSGTMP4 palignr $4, MSGTMP0, MSGTMP4 paddd MSGTMP4, MSGTMP2 sha256msg2 MSGTMP1, MSGTMP2 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP1, MSGTMP0 /* Rounds 24-27 */ movdqa MSGTMP2, MSG paddd 6*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP2, MSGTMP4 palignr $4, MSGTMP1, MSGTMP4 paddd MSGTMP4, MSGTMP3 sha256msg2 MSGTMP2, MSGTMP3 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP2, MSGTMP1 /* Rounds 28-31 */ movdqa MSGTMP3, MSG paddd 7*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP3, MSGTMP4 palignr $4, MSGTMP2, MSGTMP4 paddd MSGTMP4, MSGTMP0 sha256msg2 MSGTMP3, MSGTMP0 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP3, MSGTMP2 /* Rounds 32-35 */ movdqa MSGTMP0, MSG paddd 8*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP0, MSGTMP4 palignr $4, MSGTMP3, MSGTMP4 paddd MSGTMP4, MSGTMP1 sha256msg2 MSGTMP0, MSGTMP1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP0, MSGTMP3 /* Rounds 36-39 */ movdqa MSGTMP1, MSG paddd 9*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP1, MSGTMP4 palignr $4, MSGTMP0, MSGTMP4 paddd MSGTMP4, MSGTMP2 sha256msg2 MSGTMP1, MSGTMP2 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP1, MSGTMP0 /* Rounds 40-43 */ movdqa MSGTMP2, MSG paddd 10*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP2, MSGTMP4 palignr $4, MSGTMP1, MSGTMP4 paddd MSGTMP4, MSGTMP3 sha256msg2 MSGTMP2, MSGTMP3 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP2, MSGTMP1 /* Rounds 44-47 */ movdqa MSGTMP3, MSG paddd 11*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP3, MSGTMP4 palignr $4, MSGTMP2, MSGTMP4 paddd MSGTMP4, MSGTMP0 sha256msg2 MSGTMP3, MSGTMP0 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP3, MSGTMP2 /* Rounds 48-51 */ movdqa MSGTMP0, MSG paddd 12*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP0, MSGTMP4 palignr $4, MSGTMP3, MSGTMP4 paddd MSGTMP4, MSGTMP1 sha256msg2 MSGTMP0, MSGTMP1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP0, MSGTMP3 /* Rounds 52-55 */ movdqa MSGTMP1, MSG paddd 13*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP1, MSGTMP4 palignr $4, MSGTMP0, MSGTMP4 paddd MSGTMP4, MSGTMP2 sha256msg2 MSGTMP1, MSGTMP2 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Rounds 56-59 */ movdqa MSGTMP2, MSG paddd 14*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP2, MSGTMP4 palignr $4, MSGTMP1, MSGTMP4 paddd MSGTMP4, MSGTMP3 sha256msg2 MSGTMP2, MSGTMP3 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Rounds 60-63 */ movdqa MSGTMP3, MSG paddd 15*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Add current hash values with previously saved */ paddd ABEF_SAVE, STATE0 paddd CDGH_SAVE, STATE1 /* Increment data pointer and loop if more to process */ add $64, DATA_PTR cmp NUM_BLKS, DATA_PTR jne .Lloop0 /* Write hash values back in the correct order */ pshufd $0x1B, STATE0, STATE0 /* FEBA */ pshufd $0xB1, STATE1, STATE1 /* DCHG */ movdqa STATE0, MSGTMP4 pblendw $0xF0, STATE1, STATE0 /* DCBA */ palignr $8, MSGTMP4, STATE1 /* HGFE */ movdqu STATE0, 0*16(DIGEST_PTR) movdqu STATE1, 1*16(DIGEST_PTR) .Ldone_hash: ret ENDPROC(sha256_ni_transform) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203
AirFortressIlikara/LS2K0300-linux-4.19
4,834
arch/x86/crypto/aes-x86_64-asm_64.S
/* AES (Rijndael) implementation (FIPS PUB 197) for x86_64 * * Copyright (C) 2005 Andreas Steinmetz, <ast@domdv.de> * * License: * This code can be distributed under the terms of the GNU General Public * License (GPL) Version 2 provided that the above header down to and * including this sentence is retained in full. */ .extern crypto_ft_tab .extern crypto_it_tab .extern crypto_fl_tab .extern crypto_il_tab .text #include <linux/linkage.h> #include <asm/asm-offsets.h> #define R1 %rax #define R1E %eax #define R1X %ax #define R1H %ah #define R1L %al #define R2 %rbx #define R2E %ebx #define R2X %bx #define R2H %bh #define R2L %bl #define R3 %rcx #define R3E %ecx #define R3X %cx #define R3H %ch #define R3L %cl #define R4 %rdx #define R4E %edx #define R4X %dx #define R4H %dh #define R4L %dl #define R5 %rsi #define R5E %esi #define R6 %rdi #define R6E %edi #define R7 %r9 /* don't use %rbp; it breaks stack traces */ #define R7E %r9d #define R8 %r8 #define R10 %r10 #define R11 %r11 #define prologue(FUNC,KEY,B128,B192,r1,r2,r5,r6,r7,r8,r9,r10,r11) \ ENTRY(FUNC); \ movq r1,r2; \ leaq KEY+48(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ movl 480(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ xorl -36(r9),r7 ## E; \ cmpl $24,r10 ## E; \ jb B128; \ leaq 32(r9),r9; \ je B192; \ leaq 32(r9),r9; #define epilogue(FUNC,r1,r2,r5,r6,r7,r8,r9) \ movq r1,r2; \ movl r5 ## E,(r9); \ movl r6 ## E,4(r9); \ movl r7 ## E,8(r9); \ movl r8 ## E,12(r9); \ ret; \ ENDPROC(FUNC); #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ movzbl r2 ## H,r5 ## E; \ movzbl r2 ## L,r6 ## E; \ movl TAB+1024(,r5,4),r5 ## E;\ movw r4 ## X,r2 ## X; \ movl TAB(,r6,4),r6 ## E; \ roll $16,r2 ## E; \ shrl $16,r4 ## E; \ movzbl r4 ## L,r7 ## E; \ movzbl r4 ## H,r4 ## E; \ xorl OFFSET(r8),ra ## E; \ xorl OFFSET+4(r8),rb ## E; \ xorl TAB+3072(,r4,4),r5 ## E;\ xorl TAB+2048(,r7,4),r6 ## E;\ movzbl r1 ## L,r7 ## E; \ movzbl r1 ## H,r4 ## E; \ movl TAB+1024(,r4,4),r4 ## E;\ movw r3 ## X,r1 ## X; \ roll $16,r1 ## E; \ shrl $16,r3 ## E; \ xorl TAB(,r7,4),r5 ## E; \ movzbl r3 ## L,r7 ## E; \ movzbl r3 ## H,r3 ## E; \ xorl TAB+3072(,r3,4),r4 ## E;\ xorl TAB+2048(,r7,4),r5 ## E;\ movzbl r1 ## L,r7 ## E; \ movzbl r1 ## H,r3 ## E; \ shrl $16,r1 ## E; \ xorl TAB+3072(,r3,4),r6 ## E;\ movl TAB+2048(,r7,4),r3 ## E;\ movzbl r1 ## L,r7 ## E; \ movzbl r1 ## H,r1 ## E; \ xorl TAB+1024(,r1,4),r6 ## E;\ xorl TAB(,r7,4),r3 ## E; \ movzbl r2 ## H,r1 ## E; \ movzbl r2 ## L,r7 ## E; \ shrl $16,r2 ## E; \ xorl TAB+3072(,r1,4),r3 ## E;\ xorl TAB+2048(,r7,4),r4 ## E;\ movzbl r2 ## H,r1 ## E; \ movzbl r2 ## L,r2 ## E; \ xorl OFFSET+8(r8),rc ## E; \ xorl OFFSET+12(r8),rd ## E; \ xorl TAB+1024(,r1,4),r3 ## E;\ xorl TAB(,r2,4),r4 ## E; #define move_regs(r1,r2,r3,r4) \ movl r3 ## E,r1 ## E; \ movl r4 ## E,r2 ## E; #define entry(FUNC,KEY,B128,B192) \ prologue(FUNC,KEY,B128,B192,R2,R8,R1,R3,R4,R6,R10,R5,R11) #define return(FUNC) epilogue(FUNC,R8,R2,R5,R6,R3,R4,R11) #define encrypt_round(TAB,OFFSET) \ round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) \ move_regs(R1,R2,R5,R6) #define encrypt_final(TAB,OFFSET) \ round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) #define decrypt_round(TAB,OFFSET) \ round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) \ move_regs(R1,R2,R5,R6) #define decrypt_final(TAB,OFFSET) \ round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) /* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */ entry(aes_enc_blk,0,.Le128,.Le192) encrypt_round(crypto_ft_tab,-96) encrypt_round(crypto_ft_tab,-80) .Le192: encrypt_round(crypto_ft_tab,-64) encrypt_round(crypto_ft_tab,-48) .Le128: encrypt_round(crypto_ft_tab,-32) encrypt_round(crypto_ft_tab,-16) encrypt_round(crypto_ft_tab, 0) encrypt_round(crypto_ft_tab, 16) encrypt_round(crypto_ft_tab, 32) encrypt_round(crypto_ft_tab, 48) encrypt_round(crypto_ft_tab, 64) encrypt_round(crypto_ft_tab, 80) encrypt_round(crypto_ft_tab, 96) encrypt_final(crypto_fl_tab,112) return(aes_enc_blk) /* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ entry(aes_dec_blk,240,.Ld128,.Ld192) decrypt_round(crypto_it_tab,-96) decrypt_round(crypto_it_tab,-80) .Ld192: decrypt_round(crypto_it_tab,-64) decrypt_round(crypto_it_tab,-48) .Ld128: decrypt_round(crypto_it_tab,-32) decrypt_round(crypto_it_tab,-16) decrypt_round(crypto_it_tab, 0) decrypt_round(crypto_it_tab, 16) decrypt_round(crypto_it_tab, 32) decrypt_round(crypto_it_tab, 48) decrypt_round(crypto_it_tab, 64) decrypt_round(crypto_it_tab, 80) decrypt_round(crypto_it_tab, 96) decrypt_final(crypto_il_tab,112) return(aes_dec_blk)
AirFortressIlikara/LS2K0300-linux-4.19
12,924
arch/x86/crypto/aegis128l-aesni-asm.S
/* * AES-NI + SSE2 implementation of AEGIS-128L * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/frame.h> #define STATE0 %xmm0 #define STATE1 %xmm1 #define STATE2 %xmm2 #define STATE3 %xmm3 #define STATE4 %xmm4 #define STATE5 %xmm5 #define STATE6 %xmm6 #define STATE7 %xmm7 #define MSG0 %xmm8 #define MSG1 %xmm9 #define T0 %xmm10 #define T1 %xmm11 #define T2 %xmm12 #define T3 %xmm13 #define STATEP %rdi #define LEN %rsi #define SRC %rdx #define DST %rcx .section .rodata.cst16.aegis128l_const, "aM", @progbits, 32 .align 16 .Laegis128l_const_0: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .Laegis128l_const_1: .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst16.aegis128l_counter, "aM", @progbits, 16 .align 16 .Laegis128l_counter0: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .Laegis128l_counter1: .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f .text /* * __load_partial: internal ABI * input: * LEN - bytes * SRC - src * output: * MSG0 - first message block * MSG1 - second message block * changed: * T0 * %r8 * %r9 */ __load_partial: xor %r9d, %r9d pxor MSG0, MSG0 pxor MSG1, MSG1 mov LEN, %r8 and $0x1, %r8 jz .Lld_partial_1 mov LEN, %r8 and $0x1E, %r8 add SRC, %r8 mov (%r8), %r9b .Lld_partial_1: mov LEN, %r8 and $0x2, %r8 jz .Lld_partial_2 mov LEN, %r8 and $0x1C, %r8 add SRC, %r8 shl $0x10, %r9 mov (%r8), %r9w .Lld_partial_2: mov LEN, %r8 and $0x4, %r8 jz .Lld_partial_4 mov LEN, %r8 and $0x18, %r8 add SRC, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG0 mov LEN, %r8 and $0x8, %r8 jz .Lld_partial_8 mov LEN, %r8 and $0x10, %r8 add SRC, %r8 pslldq $8, MSG0 movq (%r8), T0 pxor T0, MSG0 .Lld_partial_8: mov LEN, %r8 and $0x10, %r8 jz .Lld_partial_16 movdqa MSG0, MSG1 movdqu (SRC), MSG0 .Lld_partial_16: ret ENDPROC(__load_partial) /* * __store_partial: internal ABI * input: * LEN - bytes * DST - dst * output: * T0 - first message block * T1 - second message block * changed: * %r8 * %r9 * %r10 */ __store_partial: mov LEN, %r8 mov DST, %r9 cmp $16, %r8 jl .Lst_partial_16 movdqu T0, (%r9) movdqa T1, T0 sub $16, %r8 add $16, %r9 .Lst_partial_16: movq T0, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) psrldq $8, T0 movq T0, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $0x10, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: ret ENDPROC(__store_partial) .macro update movdqa STATE7, T0 aesenc STATE0, STATE7 aesenc STATE1, STATE0 aesenc STATE2, STATE1 aesenc STATE3, STATE2 aesenc STATE4, STATE3 aesenc STATE5, STATE4 aesenc STATE6, STATE5 aesenc T0, STATE6 .endm .macro update0 update pxor MSG0, STATE7 pxor MSG1, STATE3 .endm .macro update1 update pxor MSG0, STATE6 pxor MSG1, STATE2 .endm .macro update2 update pxor MSG0, STATE5 pxor MSG1, STATE1 .endm .macro update3 update pxor MSG0, STATE4 pxor MSG1, STATE0 .endm .macro update4 update pxor MSG0, STATE3 pxor MSG1, STATE7 .endm .macro update5 update pxor MSG0, STATE2 pxor MSG1, STATE6 .endm .macro update6 update pxor MSG0, STATE1 pxor MSG1, STATE5 .endm .macro update7 update pxor MSG0, STATE0 pxor MSG1, STATE4 .endm .macro state_load movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 movdqu 0x50(STATEP), STATE5 movdqu 0x60(STATEP), STATE6 movdqu 0x70(STATEP), STATE7 .endm .macro state_store s0 s1 s2 s3 s4 s5 s6 s7 movdqu \s7, 0x00(STATEP) movdqu \s0, 0x10(STATEP) movdqu \s1, 0x20(STATEP) movdqu \s2, 0x30(STATEP) movdqu \s3, 0x40(STATEP) movdqu \s4, 0x50(STATEP) movdqu \s5, 0x60(STATEP) movdqu \s6, 0x70(STATEP) .endm .macro state_store0 state_store STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 .endm .macro state_store1 state_store STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 .endm .macro state_store2 state_store STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 .endm .macro state_store3 state_store STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 .endm .macro state_store4 state_store STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 .endm .macro state_store5 state_store STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 .endm .macro state_store6 state_store STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 .endm .macro state_store7 state_store STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 .endm /* * void crypto_aegis128l_aesni_init(void *state, const void *key, const void *iv); */ ENTRY(crypto_aegis128l_aesni_init) FRAME_BEGIN /* load key: */ movdqa (%rsi), MSG1 movdqa MSG1, STATE0 movdqa MSG1, STATE4 movdqa MSG1, STATE5 movdqa MSG1, STATE6 movdqa MSG1, STATE7 /* load IV: */ movdqu (%rdx), MSG0 pxor MSG0, STATE0 pxor MSG0, STATE4 /* load the constants: */ movdqa .Laegis128l_const_0, STATE2 movdqa .Laegis128l_const_1, STATE1 movdqa STATE1, STATE3 pxor STATE2, STATE5 pxor STATE1, STATE6 pxor STATE2, STATE7 /* update 10 times with IV and KEY: */ update0 update1 update2 update3 update4 update5 update6 update7 update0 update1 state_store1 FRAME_END ret ENDPROC(crypto_aegis128l_aesni_init) .macro ad_block a i movdq\a (\i * 0x20 + 0x00)(SRC), MSG0 movdq\a (\i * 0x20 + 0x10)(SRC), MSG1 update\i sub $0x20, LEN cmp $0x20, LEN jl .Lad_out_\i .endm /* * void crypto_aegis128l_aesni_ad(void *state, unsigned int length, * const void *data); */ ENTRY(crypto_aegis128l_aesni_ad) FRAME_BEGIN cmp $0x20, LEN jb .Lad_out state_load mov SRC, %r8 and $0xf, %r8 jnz .Lad_u_loop .align 8 .Lad_a_loop: ad_block a 0 ad_block a 1 ad_block a 2 ad_block a 3 ad_block a 4 ad_block a 5 ad_block a 6 ad_block a 7 add $0x100, SRC jmp .Lad_a_loop .align 8 .Lad_u_loop: ad_block u 0 ad_block u 1 ad_block u 2 ad_block u 3 ad_block u 4 ad_block u 5 ad_block u 6 ad_block u 7 add $0x100, SRC jmp .Lad_u_loop .Lad_out_0: state_store0 FRAME_END ret .Lad_out_1: state_store1 FRAME_END ret .Lad_out_2: state_store2 FRAME_END ret .Lad_out_3: state_store3 FRAME_END ret .Lad_out_4: state_store4 FRAME_END ret .Lad_out_5: state_store5 FRAME_END ret .Lad_out_6: state_store6 FRAME_END ret .Lad_out_7: state_store7 FRAME_END ret .Lad_out: FRAME_END ret ENDPROC(crypto_aegis128l_aesni_ad) .macro crypt m0 m1 s0 s1 s2 s3 s4 s5 s6 s7 pxor \s1, \m0 pxor \s6, \m0 movdqa \s2, T3 pand \s3, T3 pxor T3, \m0 pxor \s2, \m1 pxor \s5, \m1 movdqa \s6, T3 pand \s7, T3 pxor T3, \m1 .endm .macro crypt0 m0 m1 crypt \m0 \m1 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 .endm .macro crypt1 m0 m1 crypt \m0 \m1 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 .endm .macro crypt2 m0 m1 crypt \m0 \m1 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 .endm .macro crypt3 m0 m1 crypt \m0 \m1 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 .endm .macro crypt4 m0 m1 crypt \m0 \m1 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 .endm .macro crypt5 m0 m1 crypt \m0 \m1 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 .endm .macro crypt6 m0 m1 crypt \m0 \m1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 .endm .macro crypt7 m0 m1 crypt \m0 \m1 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 .endm .macro encrypt_block a i movdq\a (\i * 0x20 + 0x00)(SRC), MSG0 movdq\a (\i * 0x20 + 0x10)(SRC), MSG1 movdqa MSG0, T0 movdqa MSG1, T1 crypt\i T0, T1 movdq\a T0, (\i * 0x20 + 0x00)(DST) movdq\a T1, (\i * 0x20 + 0x10)(DST) update\i sub $0x20, LEN cmp $0x20, LEN jl .Lenc_out_\i .endm .macro decrypt_block a i movdq\a (\i * 0x20 + 0x00)(SRC), MSG0 movdq\a (\i * 0x20 + 0x10)(SRC), MSG1 crypt\i MSG0, MSG1 movdq\a MSG0, (\i * 0x20 + 0x00)(DST) movdq\a MSG1, (\i * 0x20 + 0x10)(DST) update\i sub $0x20, LEN cmp $0x20, LEN jl .Ldec_out_\i .endm /* * void crypto_aegis128l_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128l_aesni_enc) FRAME_BEGIN cmp $0x20, LEN jb .Lenc_out state_load mov SRC, %r8 or DST, %r8 and $0xf, %r8 jnz .Lenc_u_loop .align 8 .Lenc_a_loop: encrypt_block a 0 encrypt_block a 1 encrypt_block a 2 encrypt_block a 3 encrypt_block a 4 encrypt_block a 5 encrypt_block a 6 encrypt_block a 7 add $0x100, SRC add $0x100, DST jmp .Lenc_a_loop .align 8 .Lenc_u_loop: encrypt_block u 0 encrypt_block u 1 encrypt_block u 2 encrypt_block u 3 encrypt_block u 4 encrypt_block u 5 encrypt_block u 6 encrypt_block u 7 add $0x100, SRC add $0x100, DST jmp .Lenc_u_loop .Lenc_out_0: state_store0 FRAME_END ret .Lenc_out_1: state_store1 FRAME_END ret .Lenc_out_2: state_store2 FRAME_END ret .Lenc_out_3: state_store3 FRAME_END ret .Lenc_out_4: state_store4 FRAME_END ret .Lenc_out_5: state_store5 FRAME_END ret .Lenc_out_6: state_store6 FRAME_END ret .Lenc_out_7: state_store7 FRAME_END ret .Lenc_out: FRAME_END ret ENDPROC(crypto_aegis128l_aesni_enc) /* * void crypto_aegis128l_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128l_aesni_enc_tail) FRAME_BEGIN state_load /* encrypt message: */ call __load_partial movdqa MSG0, T0 movdqa MSG1, T1 crypt0 T0, T1 call __store_partial update0 state_store0 FRAME_END ret ENDPROC(crypto_aegis128l_aesni_enc_tail) /* * void crypto_aegis128l_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128l_aesni_dec) FRAME_BEGIN cmp $0x20, LEN jb .Ldec_out state_load mov SRC, %r8 or DST, %r8 and $0xF, %r8 jnz .Ldec_u_loop .align 8 .Ldec_a_loop: decrypt_block a 0 decrypt_block a 1 decrypt_block a 2 decrypt_block a 3 decrypt_block a 4 decrypt_block a 5 decrypt_block a 6 decrypt_block a 7 add $0x100, SRC add $0x100, DST jmp .Ldec_a_loop .align 8 .Ldec_u_loop: decrypt_block u 0 decrypt_block u 1 decrypt_block u 2 decrypt_block u 3 decrypt_block u 4 decrypt_block u 5 decrypt_block u 6 decrypt_block u 7 add $0x100, SRC add $0x100, DST jmp .Ldec_u_loop .Ldec_out_0: state_store0 FRAME_END ret .Ldec_out_1: state_store1 FRAME_END ret .Ldec_out_2: state_store2 FRAME_END ret .Ldec_out_3: state_store3 FRAME_END ret .Ldec_out_4: state_store4 FRAME_END ret .Ldec_out_5: state_store5 FRAME_END ret .Ldec_out_6: state_store6 FRAME_END ret .Ldec_out_7: state_store7 FRAME_END ret .Ldec_out: FRAME_END ret ENDPROC(crypto_aegis128l_aesni_dec) /* * void crypto_aegis128l_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128l_aesni_dec_tail) FRAME_BEGIN state_load /* decrypt message: */ call __load_partial crypt0 MSG0, MSG1 movdqa MSG0, T0 movdqa MSG1, T1 call __store_partial /* mask with byte count: */ movq LEN, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 movdqa T0, T1 movdqa .Laegis128l_counter0, T2 movdqa .Laegis128l_counter1, T3 pcmpgtb T2, T0 pcmpgtb T3, T1 pand T0, MSG0 pand T1, MSG1 update0 state_store0 FRAME_END ret ENDPROC(crypto_aegis128l_aesni_dec_tail) /* * void crypto_aegis128l_aesni_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ ENTRY(crypto_aegis128l_aesni_final) FRAME_BEGIN state_load /* prepare length block: */ movq %rdx, MSG0 movq %rcx, T0 pslldq $8, T0 pxor T0, MSG0 psllq $3, MSG0 /* multiply by 8 (to get bit count) */ pxor STATE2, MSG0 movdqa MSG0, MSG1 /* update state: */ update0 update1 update2 update3 update4 update5 update6 /* xor tag: */ movdqu (%rsi), T0 pxor STATE1, T0 pxor STATE2, T0 pxor STATE3, T0 pxor STATE4, T0 pxor STATE5, T0 pxor STATE6, T0 pxor STATE7, T0 movdqu T0, (%rsi) FRAME_END ret ENDPROC(crypto_aegis128l_aesni_final)
AirFortressIlikara/LS2K0300-linux-4.19
11,231
arch/x86/crypto/twofish-avx-x86_64-asm_64.S
/* * Twofish Cipher 8-way parallel algorithm (AVX/x86_64) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx.S" .file "twofish-avx-x86_64-asm_64.S" .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16 .align 16 .Lxts_gf128mul_and_shl1_mask: .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .text /* structure of crypto context */ #define s0 0 #define s1 1024 #define s2 2048 #define s3 3072 #define w 4096 #define k 4128 /********************************************************************** 8-way AVX twofish **********************************************************************/ #define CTX %rdi #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RA2 %xmm4 #define RB2 %xmm5 #define RC2 %xmm6 #define RD2 %xmm7 #define RX0 %xmm8 #define RY0 %xmm9 #define RX1 %xmm10 #define RY1 %xmm11 #define RK1 %xmm12 #define RK2 %xmm13 #define RT %xmm14 #define RR %xmm15 #define RID1 %r13 #define RID1d %r13d #define RID2 %rsi #define RID2d %esi #define RGI1 %rdx #define RGI1bl %dl #define RGI1bh %dh #define RGI2 %rcx #define RGI2bl %cl #define RGI2bh %ch #define RGI3 %rax #define RGI3bl %al #define RGI3bh %ah #define RGI4 %rbx #define RGI4bl %bl #define RGI4bh %bh #define RGS1 %r8 #define RGS1d %r8d #define RGS2 %r9 #define RGS2d %r9d #define RGS3 %r10 #define RGS3d %r10d #define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \ movzbl src ## bl, RID1d; \ movzbl src ## bh, RID2d; \ shrq $16, src; \ movl t0(CTX, RID1, 4), dst ## d; \ movl t1(CTX, RID2, 4), RID2d; \ movzbl src ## bl, RID1d; \ xorl RID2d, dst ## d; \ movzbl src ## bh, RID2d; \ interleave_op(il_reg); \ xorl t2(CTX, RID1, 4), dst ## d; \ xorl t3(CTX, RID2, 4), dst ## d; #define dummy(d) /* do nothing */ #define shr_next(reg) \ shrq $16, reg; #define G(gi1, gi2, x, t0, t1, t2, t3) \ lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \ lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \ \ lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \ shlq $32, RGS2; \ orq RGS1, RGS2; \ lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \ shlq $32, RGS1; \ orq RGS1, RGS3; #define round_head_2(a, b, x1, y1, x2, y2) \ vmovq b ## 1, RGI3; \ vpextrq $1, b ## 1, RGI4; \ \ G(RGI1, RGI2, x1, s0, s1, s2, s3); \ vmovq a ## 2, RGI1; \ vpextrq $1, a ## 2, RGI2; \ vmovq RGS2, x1; \ vpinsrq $1, RGS3, x1, x1; \ \ G(RGI3, RGI4, y1, s1, s2, s3, s0); \ vmovq b ## 2, RGI3; \ vpextrq $1, b ## 2, RGI4; \ vmovq RGS2, y1; \ vpinsrq $1, RGS3, y1, y1; \ \ G(RGI1, RGI2, x2, s0, s1, s2, s3); \ vmovq RGS2, x2; \ vpinsrq $1, RGS3, x2, x2; \ \ G(RGI3, RGI4, y2, s1, s2, s3, s0); \ vmovq RGS2, y2; \ vpinsrq $1, RGS3, y2, y2; #define encround_tail(a, b, c, d, x, y, prerotate) \ vpaddd x, y, x; \ vpaddd x, RK1, RT;\ prerotate(b); \ vpxor RT, c, c; \ vpaddd y, x, y; \ vpaddd y, RK2, y; \ vpsrld $1, c, RT; \ vpslld $(32 - 1), c, c; \ vpor c, RT, c; \ vpxor d, y, d; \ #define decround_tail(a, b, c, d, x, y, prerotate) \ vpaddd x, y, x; \ vpaddd x, RK1, RT;\ prerotate(a); \ vpxor RT, c, c; \ vpaddd y, x, y; \ vpaddd y, RK2, y; \ vpxor d, y, d; \ vpsrld $1, d, y; \ vpslld $(32 - 1), d, d; \ vpor d, y, d; \ #define rotate_1l(x) \ vpslld $1, x, RR; \ vpsrld $(32 - 1), x, x; \ vpor x, RR, x; #define preload_rgi(c) \ vmovq c, RGI1; \ vpextrq $1, c, RGI2; #define encrypt_round(n, a, b, c, d, preload, prerotate) \ vbroadcastss (k+4*(2*(n)))(CTX), RK1; \ vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \ round_head_2(a, b, RX0, RY0, RX1, RY1); \ encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \ preload(c ## 1); \ encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate); #define decrypt_round(n, a, b, c, d, preload, prerotate) \ vbroadcastss (k+4*(2*(n)))(CTX), RK1; \ vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \ round_head_2(a, b, RX0, RY0, RX1, RY1); \ decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \ preload(c ## 1); \ decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate); #define encrypt_cycle(n) \ encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \ encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); #define encrypt_cycle_last(n) \ encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \ encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy); #define decrypt_cycle(n) \ decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \ decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); #define decrypt_cycle_last(n) \ decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \ decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy); #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ vpxor x0, wkey, x0; \ vpxor x1, wkey, x1; \ vpxor x2, wkey, x2; \ vpxor x3, wkey, x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ vpxor x0, wkey, x0; \ vpxor x1, wkey, x1; \ vpxor x2, wkey, x2; \ vpxor x3, wkey, x3; .align 8 __twofish_enc_blk8: /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks */ vmovdqu w(CTX), RK1; pushq %r13; pushq %rbx; pushq %rcx; inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); preload_rgi(RA1); rotate_1l(RD1); inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); rotate_1l(RD2); encrypt_cycle(0); encrypt_cycle(1); encrypt_cycle(2); encrypt_cycle(3); encrypt_cycle(4); encrypt_cycle(5); encrypt_cycle(6); encrypt_cycle_last(7); vmovdqu (w+4*4)(CTX), RK1; popq %rcx; popq %rbx; popq %r13; outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); ret; ENDPROC(__twofish_enc_blk8) .align 8 __twofish_dec_blk8: /* input: * %rdi: ctx, CTX * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks */ vmovdqu (w+4*4)(CTX), RK1; pushq %r13; pushq %rbx; inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); preload_rgi(RC1); rotate_1l(RA1); inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); rotate_1l(RA2); decrypt_cycle(7); decrypt_cycle(6); decrypt_cycle(5); decrypt_cycle(4); decrypt_cycle(3); decrypt_cycle(2); decrypt_cycle(1); decrypt_cycle_last(0); vmovdqu (w)(CTX), RK1; popq %rbx; popq %r13; outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); ret; ENDPROC(__twofish_dec_blk8) ENTRY(twofish_ecb_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __twofish_enc_blk8; store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END ret; ENDPROC(twofish_ecb_enc_8way) ENTRY(twofish_ecb_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN movq %rsi, %r11; load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); call __twofish_dec_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END ret; ENDPROC(twofish_ecb_dec_8way) ENTRY(twofish_cbc_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r12; movq %rsi, %r11; movq %rdx, %r12; load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); call __twofish_dec_blk8; store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r12; FRAME_END ret; ENDPROC(twofish_cbc_dec_8way) ENTRY(twofish_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN pushq %r12; movq %rsi, %r11; movq %rdx, %r12; load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX0, RX1, RY0); call __twofish_enc_blk8; store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); popq %r12; FRAME_END ret; ENDPROC(twofish_ctr_8way) ENTRY(twofish_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); call __twofish_enc_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END ret; ENDPROC(twofish_xts_enc_8way) ENTRY(twofish_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2, RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); call __twofish_dec_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END ret; ENDPROC(twofish_xts_dec_8way)
AirFortressIlikara/LS2K0300-linux-4.19
38,737
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
/* * x86_64/AVX2/AES-NI assembler implementation of Camellia * * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/linkage.h> #include <asm/frame.h> #include <asm/nospec-branch.h> #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct camellia_ctx: */ #define key_table 0 #define key_length CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; #define ymm0_x xmm0 #define ymm1_x xmm1 #define ymm2_x xmm2 #define ymm3_x xmm3 #define ymm4_x xmm4 #define ymm5_x xmm5 #define ymm6_x xmm6 #define ymm7_x xmm7 #define ymm8_x xmm8 #define ymm9_x xmm9 #define ymm10_x xmm10 #define ymm11_x xmm11 #define ymm12_x xmm12 #define ymm13_x xmm13 #define ymm14_x xmm14 #define ymm15_x xmm15 /********************************************************************** 32-way camellia **********************************************************************/ /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vbroadcasti128 .Linv_shift_row, t4; \ vpbroadcastd .L0f0f0f0f, t7; \ vbroadcasti128 .Lpre_tf_lo_s1, t5; \ vbroadcasti128 .Lpre_tf_hi_s1, t6; \ vbroadcasti128 .Lpre_tf_lo_s4, t2; \ vbroadcasti128 .Lpre_tf_hi_s4, t3; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ \ /* prefilter sboxes 1, 2 and 3 */ \ /* prefilter sbox 4 */ \ filter_8bit(x0, t5, t6, t7, t4); \ filter_8bit(x7, t5, t6, t7, t4); \ vextracti128 $1, x0, t0##_x; \ vextracti128 $1, x7, t1##_x; \ filter_8bit(x3, t2, t3, t7, t4); \ filter_8bit(x6, t2, t3, t7, t4); \ vextracti128 $1, x3, t3##_x; \ vextracti128 $1, x6, t2##_x; \ filter_8bit(x2, t5, t6, t7, t4); \ filter_8bit(x5, t5, t6, t7, t4); \ filter_8bit(x1, t5, t6, t7, t4); \ filter_8bit(x4, t5, t6, t7, t4); \ \ vpxor t4##_x, t4##_x, t4##_x; \ \ /* AES subbytes + AES shift rows */ \ vextracti128 $1, x2, t6##_x; \ vextracti128 $1, x5, t5##_x; \ vaesenclast t4##_x, x0##_x, x0##_x; \ vaesenclast t4##_x, t0##_x, t0##_x; \ vinserti128 $1, t0##_x, x0, x0; \ vaesenclast t4##_x, x7##_x, x7##_x; \ vaesenclast t4##_x, t1##_x, t1##_x; \ vinserti128 $1, t1##_x, x7, x7; \ vaesenclast t4##_x, x3##_x, x3##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vinserti128 $1, t3##_x, x3, x3; \ vaesenclast t4##_x, x6##_x, x6##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t2##_x, x6, x6; \ vextracti128 $1, x1, t3##_x; \ vextracti128 $1, x4, t2##_x; \ vbroadcasti128 .Lpost_tf_lo_s1, t0; \ vbroadcasti128 .Lpost_tf_hi_s1, t1; \ vaesenclast t4##_x, x2##_x, x2##_x; \ vaesenclast t4##_x, t6##_x, t6##_x; \ vinserti128 $1, t6##_x, x2, x2; \ vaesenclast t4##_x, x5##_x, x5##_x; \ vaesenclast t4##_x, t5##_x, t5##_x; \ vinserti128 $1, t5##_x, x5, x5; \ vaesenclast t4##_x, x1##_x, x1##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vinserti128 $1, t3##_x, x1, x1; \ vaesenclast t4##_x, x4##_x, x4##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t2##_x, x4, x4; \ \ /* postfilter sboxes 1 and 4 */ \ vbroadcasti128 .Lpost_tf_lo_s3, t2; \ vbroadcasti128 .Lpost_tf_hi_s3, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vbroadcasti128 .Lpost_tf_lo_s2, t4; \ vbroadcasti128 .Lpost_tf_hi_s2, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ vpxor t7, t7, t7; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; /* * Size optimization... with inlined roundsm32 binary would be over 5 times * larger and would only marginally faster. */ .align 8 roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rcx, (%r9)); ret; ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, %rax, (%r9)); ret; ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ leaq (key_table + (i) * 8)(CTX), %r9; \ call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ \ vmovdqu x0, 4 * 32(mem_cd); \ vmovdqu x1, 5 * 32(mem_cd); \ vmovdqu x2, 6 * 32(mem_cd); \ vmovdqu x3, 7 * 32(mem_cd); \ vmovdqu x4, 0 * 32(mem_cd); \ vmovdqu x5, 1 * 32(mem_cd); \ vmovdqu x6, 2 * 32(mem_cd); \ vmovdqu x7, 3 * 32(mem_cd); \ \ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \ call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpxor tt0, tt0, tt0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu l4, 4 * 32(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 32(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 32(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 32(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 32(r), t0, t0; \ vpor 5 * 32(r), t1, t1; \ vpor 6 * 32(r), t2, t2; \ vpor 7 * 32(r), t3, t3; \ \ vpxor 0 * 32(r), t0, t0; \ vpxor 1 * 32(r), t1, t1; \ vpxor 2 * 32(r), t2, t2; \ vpxor 3 * 32(r), t3, t3; \ vmovdqu t0, 0 * 32(r); \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 1 * 32(r); \ vmovdqu t2, 2 * 32(r); \ vmovdqu t3, 3 * 32(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 32(r), t0, t0; \ vpand 1 * 32(r), t1, t1; \ vpand 2 * 32(r), t2, t2; \ vpand 3 * 32(r), t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 32(r), t0, t0; \ vpxor 5 * 32(r), t1, t1; \ vpxor 6 * 32(r), t2, t2; \ vpxor 7 * 32(r), t3, t3; \ vmovdqu t0, 4 * 32(r); \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 5 * 32(r); \ vmovdqu t2, 6 * 32(r); \ vmovdqu t3, 7 * 32(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 32(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 32(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 32(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 32(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vbroadcasti128 .Lshufb_16x16b, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor 0 * 32(rio), x0, y7; \ vpxor 1 * 32(rio), x0, y6; \ vpxor 2 * 32(rio), x0, y5; \ vpxor 3 * 32(rio), x0, y4; \ vpxor 4 * 32(rio), x0, y3; \ vpxor 5 * 32(rio), x0, y2; \ vpxor 6 * 32(rio), x0, y1; \ vpxor 7 * 32(rio), x0, y0; \ vpxor 8 * 32(rio), x0, x7; \ vpxor 9 * 32(rio), x0, x6; \ vpxor 10 * 32(rio), x0, x5; \ vpxor 11 * 32(rio), x0, x4; \ vpxor 12 * 32(rio), x0, x3; \ vpxor 13 * 32(rio), x0, x2; \ vpxor 14 * 32(rio), x0, x1; \ vpxor 15 * 32(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu y0, 0 * 32(mem_cd); \ vmovdqu y1, 1 * 32(mem_cd); \ vmovdqu y2, 2 * 32(mem_cd); \ vmovdqu y3, 3 * 32(mem_cd); \ vmovdqu y4, 4 * 32(mem_cd); \ vmovdqu y5, 5 * 32(mem_cd); \ vmovdqu y6, 6 * 32(mem_cd); \ vmovdqu y7, 7 * 32(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 32(rio); \ vmovdqu x1, 1 * 32(rio); \ vmovdqu x2, 2 * 32(rio); \ vmovdqu x3, 3 * 32(rio); \ vmovdqu x4, 4 * 32(rio); \ vmovdqu x5, 5 * 32(rio); \ vmovdqu x6, 6 * 32(rio); \ vmovdqu x7, 7 * 32(rio); \ vmovdqu y0, 8 * 32(rio); \ vmovdqu y1, 9 * 32(rio); \ vmovdqu y2, 10 * 32(rio); \ vmovdqu y3, 11 * 32(rio); \ vmovdqu y4, 12 * 32(rio); \ vmovdqu y5, 13 * 32(rio); \ vmovdqu y6, 14 * 32(rio); \ vmovdqu y7, 15 * 32(rio); .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 .align 32 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) .section .rodata.cst32.pack_bswap, "aM", @progbits, 32 .align 32 .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ .section .rodata.cst16, "aM", @progbits, 16 .align 16 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For XTS mode */ .Lxts_gf128mul_and_shl1_mask_0: .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .Lxts_gf128mul_and_shl1_mask_1: .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f .text .align 8 __camellia_enc_blk32: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %ymm0..%ymm15: 32 plaintext blocks * output: * %ymm0..%ymm15: 32 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX), ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 8); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX), ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 16); movl $24, %r8d; cmpl $16, key_length(CTX); jne .Lenc_max32; .Lenc_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); FRAME_END ret; .align 8 .Lenc_max32: movl $32, %r8d; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX), ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 24); jmp .Lenc_done; ENDPROC(__camellia_enc_blk32) .align 8 __camellia_dec_blk32: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 16 encrypted blocks * output: * %ymm0..%ymm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); cmpl $32, %r8d; je .Ldec_max32; .Ldec_max24: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 16); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX), ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX)); dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 8); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX), ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX)); dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); FRAME_END ret; .align 8 .Ldec_max32: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 24); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX), ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; ENDPROC(__camellia_dec_blk32) ENTRY(camellia_ecb_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ FRAME_BEGIN vzeroupper; inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_enc_blk32; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END ret; ENDPROC(camellia_ecb_enc_32way) ENTRY(camellia_ecb_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ FRAME_BEGIN vzeroupper; cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_dec_blk32; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END ret; ENDPROC(camellia_ecb_dec_32way) ENTRY(camellia_cbc_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ FRAME_BEGIN vzeroupper; cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); movq %rsp, %r10; cmpq %rsi, %rdx; je .Lcbc_dec_use_stack; /* dst can be used as temporary storage, src is not overwritten. */ movq %rsi, %rax; jmp .Lcbc_dec_continue; .Lcbc_dec_use_stack: /* * dst still in-use (because dst == src), so use stack for temporary * storage. */ subq $(16 * 32), %rsp; movq %rsp, %rax; .Lcbc_dec_continue: call __camellia_dec_blk32; vmovdqu %ymm7, (%rax); vpxor %ymm7, %ymm7, %ymm7; vinserti128 $1, (%rdx), %ymm7, %ymm7; vpxor (%rax), %ymm7, %ymm7; movq %r10, %rsp; vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6; vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5; vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4; vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3; vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2; vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0; vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15; vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14; vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13; vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12; vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11; vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10; vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9; vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END ret; ENDPROC(camellia_cbc_dec_32way) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; #define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ vpcmpeqq minus_one, x, tmp1; \ vpcmpeqq minus_two, x, tmp2; \ vpsubq minus_two, x, x; \ vpor tmp2, tmp1, tmp1; \ vpslldq $8, tmp1, tmp1; \ vpsubq tmp1, x, x; ENTRY(camellia_ctr_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN vzeroupper; movq %rsp, %r10; cmpq %rsi, %rdx; je .Lctr_use_stack; /* dst can be used as temporary storage, src is not overwritten. */ movq %rsi, %rax; jmp .Lctr_continue; .Lctr_use_stack: subq $(16 * 32), %rsp; movq %rsp, %rax; .Lctr_continue: vpcmpeqd %ymm15, %ymm15, %ymm15; vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */ vpaddq %ymm15, %ymm15, %ymm12; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), %xmm0; vmovdqa %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm14); vbroadcasti128 .Lbswap128_mask, %ymm14; vinserti128 $1, %xmm0, %ymm1, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 15 * 32(%rax); /* construct IVs */ add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); /* ab:le2 ; cd:le3 */ vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 12 * 32(%rax); add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 11 * 32(%rax); add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm10; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm9; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm8; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm7; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm6; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm5; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm4; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm3; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm2; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vpshufb %ymm14, %ymm0, %ymm1; add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); vextracti128 $1, %ymm0, %xmm13; vpshufb %ymm14, %ymm0, %ymm0; inc_le128(%xmm13, %xmm15, %xmm14); vmovdqu %xmm13, (%rcx); /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor 11 * 32(%rax), %ymm15, %ymm11; vpxor 12 * 32(%rax), %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call __camellia_enc_blk32; movq %r10, %rsp; vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END ret; ENDPROC(camellia_ctr_32way) #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ vpaddq iv, iv, iv; \ vpshufd $0x13, tmp, tmp; \ vpand mask, tmp, tmp; \ vpxor tmp, iv, iv; #define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ vpsrad $31, iv, tmp0; \ vpaddq iv, iv, tmp1; \ vpsllq $2, iv, iv; \ vpshufd $0x13, tmp0, tmp0; \ vpsrad $31, tmp1, tmp1; \ vpand mask2, tmp0, tmp0; \ vpshufd $0x13, tmp1, tmp1; \ vpxor tmp0, iv, iv; \ vpand mask1, tmp1, tmp1; \ vpxor tmp1, iv, iv; .align 8 camellia_xts_crypt_32way: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) * %r8: index for input whitening key * %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32 */ FRAME_BEGIN vzeroupper; subq $(16 * 32), %rsp; movq %rsp, %rax; vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_0, %ymm12; /* load IV and construct second IV */ vmovdqu (%rcx), %xmm0; vmovdqa %xmm0, %xmm15; gf128mul_x_ble(%xmm0, %xmm12, %xmm13); vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_1, %ymm13; vinserti128 $1, %xmm0, %ymm15, %ymm0; vpxor 0 * 32(%rdx), %ymm0, %ymm15; vmovdqu %ymm15, 15 * 32(%rax); vmovdqu %ymm0, 0 * 32(%rsi); /* construct IVs */ gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 1 * 32(%rdx), %ymm0, %ymm15; vmovdqu %ymm15, 14 * 32(%rax); vmovdqu %ymm0, 1 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 2 * 32(%rdx), %ymm0, %ymm15; vmovdqu %ymm15, 13 * 32(%rax); vmovdqu %ymm0, 2 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 3 * 32(%rdx), %ymm0, %ymm15; vmovdqu %ymm15, 12 * 32(%rax); vmovdqu %ymm0, 3 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 4 * 32(%rdx), %ymm0, %ymm11; vmovdqu %ymm0, 4 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 5 * 32(%rdx), %ymm0, %ymm10; vmovdqu %ymm0, 5 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 6 * 32(%rdx), %ymm0, %ymm9; vmovdqu %ymm0, 6 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 7 * 32(%rdx), %ymm0, %ymm8; vmovdqu %ymm0, 7 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 8 * 32(%rdx), %ymm0, %ymm7; vmovdqu %ymm0, 8 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 9 * 32(%rdx), %ymm0, %ymm6; vmovdqu %ymm0, 9 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 10 * 32(%rdx), %ymm0, %ymm5; vmovdqu %ymm0, 10 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 11 * 32(%rdx), %ymm0, %ymm4; vmovdqu %ymm0, 11 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 12 * 32(%rdx), %ymm0, %ymm3; vmovdqu %ymm0, 12 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 13 * 32(%rdx), %ymm0, %ymm2; vmovdqu %ymm0, 13 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 14 * 32(%rdx), %ymm0, %ymm1; vmovdqu %ymm0, 14 * 32(%rsi); gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); vpxor 15 * 32(%rdx), %ymm0, %ymm15; vmovdqu %ymm15, 0 * 32(%rax); vmovdqu %ymm0, 15 * 32(%rsi); vextracti128 $1, %ymm0, %xmm0; gf128mul_x_ble(%xmm0, %xmm12, %xmm15); vmovdqu %xmm0, (%rcx); /* inpack32_pre: */ vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15; vpshufb .Lpack_bswap, %ymm15, %ymm15; vpxor 0 * 32(%rax), %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor 12 * 32(%rax), %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; CALL_NOSPEC %r9; addq $(16 * 32), %rsp; vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END ret; ENDPROC(camellia_xts_crypt_32way) ENTRY(camellia_xts_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ xorl %r8d, %r8d; /* input whitening key, 0 for enc */ leaq __camellia_enc_blk32, %r9; jmp camellia_xts_crypt_32way; ENDPROC(camellia_xts_enc_32way) ENTRY(camellia_xts_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* input whitening key, last for dec */ leaq __camellia_dec_blk32, %r9; jmp camellia_xts_crypt_32way; ENDPROC(camellia_xts_dec_32way)
AirFortressIlikara/LS2K0300-linux-4.19
19,183
arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
/* * Serpent Cipher 8-way parallel algorithm (x86_64/SSE2) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on crypto/serpent.c by * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no> * 2003 Herbert Valerio Riedel <hvr@gnu.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> .file "serpent-sse2-x86_64-asm_64.S" .text #define CTX %rdi /********************************************************************** 8-way SSE2 serpent **********************************************************************/ #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RE1 %xmm4 #define RA2 %xmm5 #define RB2 %xmm6 #define RC2 %xmm7 #define RD2 %xmm8 #define RE2 %xmm9 #define RNOT %xmm10 #define RK0 %xmm11 #define RK1 %xmm12 #define RK2 %xmm13 #define RK3 %xmm14 #define S0_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ por x0, x3; \ pxor x4, x0; \ pxor x2, x4; \ pxor RNOT, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x4, x1; \ pxor x0, x2; #define S0_2(x0, x1, x2, x3, x4) \ pxor x3, x0; \ por x0, x4; \ pxor x2, x0; \ pand x1, x2; \ pxor x2, x3; \ pxor RNOT, x1; \ pxor x4, x2; \ pxor x2, x1; #define S1_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x1; \ pxor x3, x0; \ pxor RNOT, x3; \ pand x1, x4; \ por x1, x0; \ pxor x2, x3; \ pxor x3, x0; \ pxor x3, x1; #define S1_2(x0, x1, x2, x3, x4) \ pxor x4, x3; \ por x4, x1; \ pxor x2, x4; \ pand x0, x2; \ pxor x1, x2; \ por x0, x1; \ pxor RNOT, x0; \ pxor x2, x0; \ pxor x1, x4; #define S2_1(x0, x1, x2, x3, x4) \ pxor RNOT, x3; \ pxor x0, x1; \ movdqa x0, x4; \ pand x2, x0; \ pxor x3, x0; \ por x4, x3; \ pxor x1, x2; \ pxor x1, x3; \ pand x0, x1; #define S2_2(x0, x1, x2, x3, x4) \ pxor x2, x0; \ pand x3, x2; \ por x1, x3; \ pxor RNOT, x0; \ pxor x0, x3; \ pxor x0, x4; \ pxor x2, x0; \ por x2, x1; #define S3_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x3, x1; \ por x0, x3; \ pand x0, x4; \ pxor x2, x0; \ pxor x1, x2; \ pand x3, x1; \ pxor x3, x2; \ por x4, x0; \ pxor x3, x4; #define S3_2(x0, x1, x2, x3, x4) \ pxor x0, x1; \ pand x3, x0; \ pand x4, x3; \ pxor x2, x3; \ por x1, x4; \ pand x1, x2; \ pxor x3, x4; \ pxor x3, x0; \ pxor x2, x3; #define S4_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x4, x0; \ pxor x2, x3; \ por x4, x2; \ pxor x1, x0; \ pxor x3, x4; \ por x0, x2; \ pxor x1, x2; #define S4_2(x0, x1, x2, x3, x4) \ pand x0, x1; \ pxor x4, x1; \ pand x2, x4; \ pxor x3, x2; \ pxor x0, x4; \ por x1, x3; \ pxor RNOT, x1; \ pxor x0, x3; #define S5_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x0, x1; \ pxor x1, x2; \ pxor RNOT, x3; \ pxor x0, x4; \ pxor x2, x0; \ pand x4, x1; \ por x3, x4; \ pxor x0, x4; #define S5_2(x0, x1, x2, x3, x4) \ pand x3, x0; \ pxor x3, x1; \ pxor x2, x3; \ pxor x1, x0; \ pand x4, x2; \ pxor x2, x1; \ pand x0, x2; \ pxor x2, x3; #define S6_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x3; \ pxor x2, x1; \ pxor x0, x2; \ pand x3, x0; \ por x3, x1; \ pxor RNOT, x4; \ pxor x1, x0; \ pxor x2, x1; #define S6_2(x0, x1, x2, x3, x4) \ pxor x4, x3; \ pxor x0, x4; \ pand x0, x2; \ pxor x1, x4; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x3; \ pxor x2, x1; #define S7_1(x0, x1, x2, x3, x4) \ pxor RNOT, x1; \ movdqa x1, x4; \ pxor RNOT, x0; \ pand x2, x1; \ pxor x3, x1; \ por x4, x3; \ pxor x2, x4; \ pxor x3, x2; \ pxor x0, x3; \ por x1, x0; #define S7_2(x0, x1, x2, x3, x4) \ pand x0, x2; \ pxor x4, x0; \ pxor x3, x4; \ pand x0, x3; \ pxor x1, x4; \ pxor x4, x2; \ pxor x1, x3; \ por x0, x4; \ pxor x1, x4; #define SI0_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pxor x0, x1; \ por x1, x3; \ pxor x1, x4; \ pxor RNOT, x0; \ pxor x3, x2; \ pxor x0, x3; \ pand x1, x0; \ pxor x2, x0; #define SI0_2(x0, x1, x2, x3, x4) \ pand x3, x2; \ pxor x4, x3; \ pxor x3, x2; \ pxor x3, x1; \ pand x0, x3; \ pxor x0, x1; \ pxor x2, x0; \ pxor x3, x4; #define SI1_1(x0, x1, x2, x3, x4) \ pxor x3, x1; \ movdqa x0, x4; \ pxor x2, x0; \ pxor RNOT, x2; \ por x1, x4; \ pxor x3, x4; \ pand x1, x3; \ pxor x2, x1; \ pand x4, x2; #define SI1_2(x0, x1, x2, x3, x4) \ pxor x1, x4; \ por x3, x1; \ pxor x0, x3; \ pxor x0, x2; \ por x4, x0; \ pxor x4, x2; \ pxor x0, x1; \ pxor x1, x4; #define SI2_1(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x3, x4; \ pxor RNOT, x3; \ por x2, x3; \ pxor x4, x2; \ pxor x0, x4; \ pxor x1, x3; \ por x2, x1; \ pxor x0, x2; #define SI2_2(x0, x1, x2, x3, x4) \ pxor x4, x1; \ por x3, x4; \ pxor x3, x2; \ pxor x2, x4; \ pand x1, x2; \ pxor x3, x2; \ pxor x4, x3; \ pxor x0, x4; #define SI3_1(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x1, x4; \ pand x2, x1; \ pxor x0, x1; \ por x4, x0; \ pxor x3, x4; \ pxor x3, x0; \ por x1, x3; \ pxor x2, x1; #define SI3_2(x0, x1, x2, x3, x4) \ pxor x3, x1; \ pxor x2, x0; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x1; \ pand x2, x0; \ pxor x3, x4; \ pxor x0, x3; \ pxor x1, x0; #define SI4_1(x0, x1, x2, x3, x4) \ pxor x3, x2; \ movdqa x0, x4; \ pand x1, x0; \ pxor x2, x0; \ por x3, x2; \ pxor RNOT, x4; \ pxor x0, x1; \ pxor x2, x0; \ pand x4, x2; #define SI4_2(x0, x1, x2, x3, x4) \ pxor x0, x2; \ por x4, x0; \ pxor x3, x0; \ pand x2, x3; \ pxor x3, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x1, x4; \ pxor x3, x0; #define SI5_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x2, x1; \ pxor x4, x2; \ pxor x3, x1; \ pand x4, x3; \ pxor x3, x2; \ por x0, x3; \ pxor RNOT, x0; \ pxor x2, x3; \ por x0, x2; #define SI5_2(x0, x1, x2, x3, x4) \ pxor x1, x4; \ pxor x4, x2; \ pand x0, x4; \ pxor x1, x0; \ pxor x3, x1; \ pand x2, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x4, x2; \ pxor x3, x4; #define SI6_1(x0, x1, x2, x3, x4) \ pxor x2, x0; \ movdqa x0, x4; \ pand x3, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x1, x3; \ por x4, x2; \ pxor x3, x2; \ pand x0, x3; #define SI6_2(x0, x1, x2, x3, x4) \ pxor RNOT, x0; \ pxor x1, x3; \ pand x2, x1; \ pxor x0, x4; \ pxor x4, x3; \ pxor x2, x4; \ pxor x1, x0; \ pxor x0, x2; #define SI7_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x2, x0; \ por x4, x2; \ pxor x1, x4; \ pxor RNOT, x0; \ por x3, x1; \ pxor x0, x4; \ pand x2, x0; \ pxor x1, x0; #define SI7_2(x0, x1, x2, x3, x4) \ pand x2, x1; \ pxor x2, x3; \ pxor x3, x4; \ pand x3, x2; \ por x0, x3; \ pxor x4, x1; \ pxor x4, x3; \ pand x0, x4; \ pxor x2, x4; #define get_key(i, j, t) \ movd (4*(i)+(j))*4(CTX), t; \ pshufd $0, t, t; #define K2(x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ get_key(i, 1, RK1); \ get_key(i, 2, RK2); \ get_key(i, 3, RK3); \ pxor RK0, x0 ## 1; \ pxor RK1, x1 ## 1; \ pxor RK2, x2 ## 1; \ pxor RK3, x3 ## 1; \ pxor RK0, x0 ## 2; \ pxor RK1, x1 ## 2; \ pxor RK2, x2 ## 2; \ pxor RK3, x3 ## 2; #define LK2(x0, x1, x2, x3, x4, i) \ movdqa x0 ## 1, x4 ## 1; \ pslld $13, x0 ## 1; \ psrld $(32 - 13), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ pxor x0 ## 1, x1 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ pslld $3, x2 ## 1; \ psrld $(32 - 3), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ pxor x2 ## 1, x1 ## 1; \ movdqa x0 ## 2, x4 ## 2; \ pslld $13, x0 ## 2; \ psrld $(32 - 13), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ pxor x0 ## 2, x1 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ pslld $3, x2 ## 2; \ psrld $(32 - 3), x4 ## 2; \ por x4 ## 2, x2 ## 2; \ pxor x2 ## 2, x1 ## 2; \ movdqa x1 ## 1, x4 ## 1; \ pslld $1, x1 ## 1; \ psrld $(32 - 1), x4 ## 1; \ por x4 ## 1, x1 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ pslld $3, x4 ## 1; \ pxor x2 ## 1, x3 ## 1; \ pxor x4 ## 1, x3 ## 1; \ movdqa x3 ## 1, x4 ## 1; \ get_key(i, 1, RK1); \ movdqa x1 ## 2, x4 ## 2; \ pslld $1, x1 ## 2; \ psrld $(32 - 1), x4 ## 2; \ por x4 ## 2, x1 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ pslld $3, x4 ## 2; \ pxor x2 ## 2, x3 ## 2; \ pxor x4 ## 2, x3 ## 2; \ movdqa x3 ## 2, x4 ## 2; \ get_key(i, 3, RK3); \ pslld $7, x3 ## 1; \ psrld $(32 - 7), x4 ## 1; \ por x4 ## 1, x3 ## 1; \ movdqa x1 ## 1, x4 ## 1; \ pslld $7, x4 ## 1; \ pxor x1 ## 1, x0 ## 1; \ pxor x3 ## 1, x0 ## 1; \ pxor x3 ## 1, x2 ## 1; \ pxor x4 ## 1, x2 ## 1; \ get_key(i, 0, RK0); \ pslld $7, x3 ## 2; \ psrld $(32 - 7), x4 ## 2; \ por x4 ## 2, x3 ## 2; \ movdqa x1 ## 2, x4 ## 2; \ pslld $7, x4 ## 2; \ pxor x1 ## 2, x0 ## 2; \ pxor x3 ## 2, x0 ## 2; \ pxor x3 ## 2, x2 ## 2; \ pxor x4 ## 2, x2 ## 2; \ get_key(i, 2, RK2); \ pxor RK1, x1 ## 1; \ pxor RK3, x3 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ pslld $5, x0 ## 1; \ psrld $(32 - 5), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ pslld $22, x2 ## 1; \ psrld $(32 - 22), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ pxor RK0, x0 ## 1; \ pxor RK2, x2 ## 1; \ pxor RK1, x1 ## 2; \ pxor RK3, x3 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ pslld $5, x0 ## 2; \ psrld $(32 - 5), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ pslld $22, x2 ## 2; \ psrld $(32 - 22), x4 ## 2; \ por x4 ## 2, x2 ## 2; \ pxor RK0, x0 ## 2; \ pxor RK2, x2 ## 2; #define KL2(x0, x1, x2, x3, x4, i) \ pxor RK0, x0 ## 1; \ pxor RK2, x2 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ psrld $5, x0 ## 1; \ pslld $(32 - 5), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ pxor RK3, x3 ## 1; \ pxor RK1, x1 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ psrld $22, x2 ## 1; \ pslld $(32 - 22), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ pxor x3 ## 1, x2 ## 1; \ pxor RK0, x0 ## 2; \ pxor RK2, x2 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ psrld $5, x0 ## 2; \ pslld $(32 - 5), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ pxor RK3, x3 ## 2; \ pxor RK1, x1 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ psrld $22, x2 ## 2; \ pslld $(32 - 22), x4 ## 2; \ por x4 ## 2, x2 ## 2; \ pxor x3 ## 2, x2 ## 2; \ pxor x3 ## 1, x0 ## 1; \ movdqa x1 ## 1, x4 ## 1; \ pslld $7, x4 ## 1; \ pxor x1 ## 1, x0 ## 1; \ pxor x4 ## 1, x2 ## 1; \ movdqa x1 ## 1, x4 ## 1; \ psrld $1, x1 ## 1; \ pslld $(32 - 1), x4 ## 1; \ por x4 ## 1, x1 ## 1; \ pxor x3 ## 2, x0 ## 2; \ movdqa x1 ## 2, x4 ## 2; \ pslld $7, x4 ## 2; \ pxor x1 ## 2, x0 ## 2; \ pxor x4 ## 2, x2 ## 2; \ movdqa x1 ## 2, x4 ## 2; \ psrld $1, x1 ## 2; \ pslld $(32 - 1), x4 ## 2; \ por x4 ## 2, x1 ## 2; \ movdqa x3 ## 1, x4 ## 1; \ psrld $7, x3 ## 1; \ pslld $(32 - 7), x4 ## 1; \ por x4 ## 1, x3 ## 1; \ pxor x0 ## 1, x1 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ pslld $3, x4 ## 1; \ pxor x4 ## 1, x3 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ movdqa x3 ## 2, x4 ## 2; \ psrld $7, x3 ## 2; \ pslld $(32 - 7), x4 ## 2; \ por x4 ## 2, x3 ## 2; \ pxor x0 ## 2, x1 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ pslld $3, x4 ## 2; \ pxor x4 ## 2, x3 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ psrld $13, x0 ## 1; \ pslld $(32 - 13), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ pxor x2 ## 1, x1 ## 1; \ pxor x2 ## 1, x3 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ psrld $3, x2 ## 1; \ pslld $(32 - 3), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ psrld $13, x0 ## 2; \ pslld $(32 - 13), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ pxor x2 ## 2, x1 ## 2; \ pxor x2 ## 2, x3 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ psrld $3, x2 ## 2; \ pslld $(32 - 3), x4 ## 2; \ por x4 ## 2, x2 ## 2; #define S(SBOX, x0, x1, x2, x3, x4) \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); #define SP(SBOX, x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 2, RK2); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ get_key(i, 3, RK3); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 1, RK1); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ movdqa x0, t2; \ punpckldq x1, x0; \ punpckhdq x1, t2; \ movdqa x2, t1; \ punpckhdq x3, x2; \ punpckldq x3, t1; \ movdqa x0, x1; \ punpcklqdq t1, x0; \ punpckhqdq t1, x1; \ movdqa t2, x3; \ punpcklqdq x2, t2; \ punpckhqdq x2, x3; \ movdqa t2, x2; #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \ movdqu (0*4*4)(in), x0; \ movdqu (1*4*4)(in), x1; \ movdqu (2*4*4)(in), x2; \ movdqu (3*4*4)(in), x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu x0, (0*4*4)(out); \ movdqu x1, (1*4*4)(out); \ movdqu x2, (2*4*4)(out); \ movdqu x3, (3*4*4)(out); #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu (0*4*4)(out), t0; \ pxor t0, x0; \ movdqu x0, (0*4*4)(out); \ movdqu (1*4*4)(out), t0; \ pxor t0, x1; \ movdqu x1, (1*4*4)(out); \ movdqu (2*4*4)(out), t0; \ pxor t0, x2; \ movdqu x2, (2*4*4)(out); \ movdqu (3*4*4)(out), t0; \ pxor t0, x3; \ movdqu x3, (3*4*4)(out); ENTRY(__serpent_enc_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ pcmpeqd RNOT, RNOT; leaq (4*4*4)(%rdx), %rax; read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 0); S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); leaq (4*4*4)(%rsi), %rax; testb %cl, %cl; jnz .L__enc_xor8; write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; .L__enc_xor8: xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; ENDPROC(__serpent_enc_blk_8way) ENTRY(serpent_dec_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ pcmpeqd RNOT, RNOT; leaq (4*4*4)(%rdx), %rax; read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 32); SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); leaq (4*4*4)(%rsi), %rax; write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; ENDPROC(serpent_dec_blk_8way)
AirFortressIlikara/LS2K0300-linux-4.19
7,233
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
/* * Twofish Cipher 3-way parallel algorithm (x86_64) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> .file "twofish-x86_64-asm-3way.S" .text /* structure of crypto context */ #define s0 0 #define s1 1024 #define s2 2048 #define s3 3072 #define w 4096 #define k 4128 /********************************************************************** 3-way twofish **********************************************************************/ #define CTX %rdi #define RIO %rdx #define RAB0 %rax #define RAB1 %rbx #define RAB2 %rcx #define RAB0d %eax #define RAB1d %ebx #define RAB2d %ecx #define RAB0bh %ah #define RAB1bh %bh #define RAB2bh %ch #define RAB0bl %al #define RAB1bl %bl #define RAB2bl %cl #define CD0 0x0(%rsp) #define CD1 0x8(%rsp) #define CD2 0x10(%rsp) # used only before/after all rounds #define RCD0 %r8 #define RCD1 %r9 #define RCD2 %r10 # used only during rounds #define RX0 %r8 #define RX1 %r9 #define RX2 %r10 #define RX0d %r8d #define RX1d %r9d #define RX2d %r10d #define RY0 %r11 #define RY1 %r12 #define RY2 %r13 #define RY0d %r11d #define RY1d %r12d #define RY2d %r13d #define RT0 %rdx #define RT1 %rsi #define RT0d %edx #define RT1d %esi #define RT1bl %sil #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ rorq $(rot), ab; \ op1##l T0(CTX, tmp2, 4), dst ## d; \ op2##l T1(CTX, tmp1, 4), dst ## d; #define swap_ab_with_cd(ab, cd, tmp) \ movq cd, tmp; \ movq ab, cd; \ movq tmp, ab; /* * Combined G1 & G2 function. Reordered with help of rotates to have moves * at begining. */ #define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \ /* G1,1 && G2,1 */ \ do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 0, ab ## 0, x ## 0); \ do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 0, ab ## 0, y ## 0); \ \ do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 1, ab ## 1, x ## 1); \ do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 1, ab ## 1, y ## 1); \ \ do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 2, ab ## 2, x ## 2); \ do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 2, ab ## 2, y ## 2); \ \ /* G1,2 && G2,2 */ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ swap_ab_with_cd(ab ## 2, cd ## 2, RT0); #define enc_round_end(ab, x, y, n) \ addl y ## d, x ## d; \ addl x ## d, y ## d; \ addl k+4*(2*(n))(CTX), x ## d; \ xorl ab ## d, x ## d; \ addl k+4*(2*(n)+1)(CTX), y ## d; \ shrq $32, ab; \ roll $1, ab ## d; \ xorl y ## d, ab ## d; \ shlq $32, ab; \ rorl $1, x ## d; \ orq x, ab; #define dec_round_end(ba, x, y, n) \ addl y ## d, x ## d; \ addl x ## d, y ## d; \ addl k+4*(2*(n))(CTX), x ## d; \ addl k+4*(2*(n)+1)(CTX), y ## d; \ xorl ba ## d, y ## d; \ shrq $32, ba; \ roll $1, ba ## d; \ xorl x ## d, ba ## d; \ shlq $32, ba; \ rorl $1, y ## d; \ orq y, ba; #define encrypt_round3(ab, cd, n) \ g1g2_3(ab, cd, s0, s1, s2, s3, s0, s1, s2, s3, RX, RY); \ \ enc_round_end(ab ## 0, RX0, RY0, n); \ enc_round_end(ab ## 1, RX1, RY1, n); \ enc_round_end(ab ## 2, RX2, RY2, n); #define decrypt_round3(ba, dc, n) \ g1g2_3(ba, dc, s1, s2, s3, s0, s3, s0, s1, s2, RY, RX); \ \ dec_round_end(ba ## 0, RX0, RY0, n); \ dec_round_end(ba ## 1, RX1, RY1, n); \ dec_round_end(ba ## 2, RX2, RY2, n); #define encrypt_cycle3(ab, cd, n) \ encrypt_round3(ab, cd, n*2); \ encrypt_round3(ab, cd, (n*2)+1); #define decrypt_cycle3(ba, dc, n) \ decrypt_round3(ba, dc, (n*2)+1); \ decrypt_round3(ba, dc, (n*2)); #define push_cd() \ pushq RCD2; \ pushq RCD1; \ pushq RCD0; #define pop_cd() \ popq RCD0; \ popq RCD1; \ popq RCD2; #define inpack3(in, n, xy, m) \ movq 4*(n)(in), xy ## 0; \ xorq w+4*m(CTX), xy ## 0; \ \ movq 4*(4+(n))(in), xy ## 1; \ xorq w+4*m(CTX), xy ## 1; \ \ movq 4*(8+(n))(in), xy ## 2; \ xorq w+4*m(CTX), xy ## 2; #define outunpack3(op, out, n, xy, m) \ xorq w+4*m(CTX), xy ## 0; \ op ## q xy ## 0, 4*(n)(out); \ \ xorq w+4*m(CTX), xy ## 1; \ op ## q xy ## 1, 4*(4+(n))(out); \ \ xorq w+4*m(CTX), xy ## 2; \ op ## q xy ## 2, 4*(8+(n))(out); #define inpack_enc3() \ inpack3(RIO, 0, RAB, 0); \ inpack3(RIO, 2, RCD, 2); #define outunpack_enc3(op) \ outunpack3(op, RIO, 2, RAB, 6); \ outunpack3(op, RIO, 0, RCD, 4); #define inpack_dec3() \ inpack3(RIO, 0, RAB, 4); \ rorq $32, RAB0; \ rorq $32, RAB1; \ rorq $32, RAB2; \ inpack3(RIO, 2, RCD, 6); \ rorq $32, RCD0; \ rorq $32, RCD1; \ rorq $32, RCD2; #define outunpack_dec3() \ rorq $32, RCD0; \ rorq $32, RCD1; \ rorq $32, RCD2; \ outunpack3(mov, RIO, 0, RCD, 0); \ rorq $32, RAB0; \ rorq $32, RAB1; \ rorq $32, RAB2; \ outunpack3(mov, RIO, 2, RAB, 2); ENTRY(__twofish_enc_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src, RIO * %rcx: bool, if true: xor output */ pushq %r13; pushq %r12; pushq %rbx; pushq %rcx; /* bool xor */ pushq %rsi; /* dst */ inpack_enc3(); push_cd(); encrypt_cycle3(RAB, CD, 0); encrypt_cycle3(RAB, CD, 1); encrypt_cycle3(RAB, CD, 2); encrypt_cycle3(RAB, CD, 3); encrypt_cycle3(RAB, CD, 4); encrypt_cycle3(RAB, CD, 5); encrypt_cycle3(RAB, CD, 6); encrypt_cycle3(RAB, CD, 7); pop_cd(); popq RIO; /* dst */ popq RT1; /* bool xor */ testb RT1bl, RT1bl; jnz .L__enc_xor3; outunpack_enc3(mov); popq %rbx; popq %r12; popq %r13; ret; .L__enc_xor3: outunpack_enc3(xor); popq %rbx; popq %r12; popq %r13; ret; ENDPROC(__twofish_enc_blk_3way) ENTRY(twofish_dec_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src, RIO */ pushq %r13; pushq %r12; pushq %rbx; pushq %rsi; /* dst */ inpack_dec3(); push_cd(); decrypt_cycle3(RAB, CD, 7); decrypt_cycle3(RAB, CD, 6); decrypt_cycle3(RAB, CD, 5); decrypt_cycle3(RAB, CD, 4); decrypt_cycle3(RAB, CD, 3); decrypt_cycle3(RAB, CD, 2); decrypt_cycle3(RAB, CD, 1); decrypt_cycle3(RAB, CD, 0); pop_cd(); popq RIO; /* dst */ outunpack_dec3(); popq %rbx; popq %r12; popq %r13; ret; ENDPROC(twofish_dec_blk_3way)
AirFortressIlikara/LS2K0300-linux-4.19
10,495
arch/x86/crypto/aegis256-aesni-asm.S
/* * AES-NI + SSE2 implementation of AEGIS-128L * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/frame.h> #define STATE0 %xmm0 #define STATE1 %xmm1 #define STATE2 %xmm2 #define STATE3 %xmm3 #define STATE4 %xmm4 #define STATE5 %xmm5 #define MSG %xmm6 #define T0 %xmm7 #define T1 %xmm8 #define T2 %xmm9 #define T3 %xmm10 #define STATEP %rdi #define LEN %rsi #define SRC %rdx #define DST %rcx .section .rodata.cst16.aegis256_const, "aM", @progbits, 32 .align 16 .Laegis256_const_0: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .Laegis256_const_1: .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst16.aegis256_counter, "aM", @progbits, 16 .align 16 .Laegis256_counter: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .text /* * __load_partial: internal ABI * input: * LEN - bytes * SRC - src * output: * MSG - message block * changed: * T0 * %r8 * %r9 */ __load_partial: xor %r9d, %r9d pxor MSG, MSG mov LEN, %r8 and $0x1, %r8 jz .Lld_partial_1 mov LEN, %r8 and $0x1E, %r8 add SRC, %r8 mov (%r8), %r9b .Lld_partial_1: mov LEN, %r8 and $0x2, %r8 jz .Lld_partial_2 mov LEN, %r8 and $0x1C, %r8 add SRC, %r8 shl $0x10, %r9 mov (%r8), %r9w .Lld_partial_2: mov LEN, %r8 and $0x4, %r8 jz .Lld_partial_4 mov LEN, %r8 and $0x18, %r8 add SRC, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG mov LEN, %r8 and $0x8, %r8 jz .Lld_partial_8 mov LEN, %r8 and $0x10, %r8 add SRC, %r8 pslldq $8, MSG movq (%r8), T0 pxor T0, MSG .Lld_partial_8: ret ENDPROC(__load_partial) /* * __store_partial: internal ABI * input: * LEN - bytes * DST - dst * output: * T0 - message block * changed: * %r8 * %r9 * %r10 */ __store_partial: mov LEN, %r8 mov DST, %r9 movq T0, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) psrldq $8, T0 movq T0, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $0x10, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: ret ENDPROC(__store_partial) .macro update movdqa STATE5, T0 aesenc STATE0, STATE5 aesenc STATE1, STATE0 aesenc STATE2, STATE1 aesenc STATE3, STATE2 aesenc STATE4, STATE3 aesenc T0, STATE4 .endm .macro update0 m update pxor \m, STATE5 .endm .macro update1 m update pxor \m, STATE4 .endm .macro update2 m update pxor \m, STATE3 .endm .macro update3 m update pxor \m, STATE2 .endm .macro update4 m update pxor \m, STATE1 .endm .macro update5 m update pxor \m, STATE0 .endm .macro state_load movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 movdqu 0x50(STATEP), STATE5 .endm .macro state_store s0 s1 s2 s3 s4 s5 movdqu \s5, 0x00(STATEP) movdqu \s0, 0x10(STATEP) movdqu \s1, 0x20(STATEP) movdqu \s2, 0x30(STATEP) movdqu \s3, 0x40(STATEP) movdqu \s4, 0x50(STATEP) .endm .macro state_store0 state_store STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 .endm .macro state_store1 state_store STATE5 STATE0 STATE1 STATE2 STATE3 STATE4 .endm .macro state_store2 state_store STATE4 STATE5 STATE0 STATE1 STATE2 STATE3 .endm .macro state_store3 state_store STATE3 STATE4 STATE5 STATE0 STATE1 STATE2 .endm .macro state_store4 state_store STATE2 STATE3 STATE4 STATE5 STATE0 STATE1 .endm .macro state_store5 state_store STATE1 STATE2 STATE3 STATE4 STATE5 STATE0 .endm /* * void crypto_aegis256_aesni_init(void *state, const void *key, const void *iv); */ ENTRY(crypto_aegis256_aesni_init) FRAME_BEGIN /* load key: */ movdqa 0x00(%rsi), MSG movdqa 0x10(%rsi), T1 movdqa MSG, STATE4 movdqa T1, STATE5 /* load IV: */ movdqu 0x00(%rdx), T2 movdqu 0x10(%rdx), T3 pxor MSG, T2 pxor T1, T3 movdqa T2, STATE0 movdqa T3, STATE1 /* load the constants: */ movdqa .Laegis256_const_0, STATE3 movdqa .Laegis256_const_1, STATE2 pxor STATE3, STATE4 pxor STATE2, STATE5 /* update 10 times with IV and KEY: */ update0 MSG update1 T1 update2 T2 update3 T3 update4 MSG update5 T1 update0 T2 update1 T3 update2 MSG update3 T1 update4 T2 update5 T3 update0 MSG update1 T1 update2 T2 update3 T3 state_store3 FRAME_END ret ENDPROC(crypto_aegis256_aesni_init) .macro ad_block a i movdq\a (\i * 0x10)(SRC), MSG update\i MSG sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_\i .endm /* * void crypto_aegis256_aesni_ad(void *state, unsigned int length, * const void *data); */ ENTRY(crypto_aegis256_aesni_ad) FRAME_BEGIN cmp $0x10, LEN jb .Lad_out state_load mov SRC, %r8 and $0xf, %r8 jnz .Lad_u_loop .align 8 .Lad_a_loop: ad_block a 0 ad_block a 1 ad_block a 2 ad_block a 3 ad_block a 4 ad_block a 5 add $0x60, SRC jmp .Lad_a_loop .align 8 .Lad_u_loop: ad_block u 0 ad_block u 1 ad_block u 2 ad_block u 3 ad_block u 4 ad_block u 5 add $0x60, SRC jmp .Lad_u_loop .Lad_out_0: state_store0 FRAME_END ret .Lad_out_1: state_store1 FRAME_END ret .Lad_out_2: state_store2 FRAME_END ret .Lad_out_3: state_store3 FRAME_END ret .Lad_out_4: state_store4 FRAME_END ret .Lad_out_5: state_store5 FRAME_END ret .Lad_out: FRAME_END ret ENDPROC(crypto_aegis256_aesni_ad) .macro crypt m s0 s1 s2 s3 s4 s5 pxor \s1, \m pxor \s4, \m pxor \s5, \m movdqa \s2, T3 pand \s3, T3 pxor T3, \m .endm .macro crypt0 m crypt \m STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 .endm .macro crypt1 m crypt \m STATE5 STATE0 STATE1 STATE2 STATE3 STATE4 .endm .macro crypt2 m crypt \m STATE4 STATE5 STATE0 STATE1 STATE2 STATE3 .endm .macro crypt3 m crypt \m STATE3 STATE4 STATE5 STATE0 STATE1 STATE2 .endm .macro crypt4 m crypt \m STATE2 STATE3 STATE4 STATE5 STATE0 STATE1 .endm .macro crypt5 m crypt \m STATE1 STATE2 STATE3 STATE4 STATE5 STATE0 .endm .macro encrypt_block a i movdq\a (\i * 0x10)(SRC), MSG movdqa MSG, T0 crypt\i T0 movdq\a T0, (\i * 0x10)(DST) update\i MSG sub $0x10, LEN cmp $0x10, LEN jl .Lenc_out_\i .endm .macro decrypt_block a i movdq\a (\i * 0x10)(SRC), MSG crypt\i MSG movdq\a MSG, (\i * 0x10)(DST) update\i MSG sub $0x10, LEN cmp $0x10, LEN jl .Ldec_out_\i .endm /* * void crypto_aegis256_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis256_aesni_enc) FRAME_BEGIN cmp $0x10, LEN jb .Lenc_out state_load mov SRC, %r8 or DST, %r8 and $0xf, %r8 jnz .Lenc_u_loop .align 8 .Lenc_a_loop: encrypt_block a 0 encrypt_block a 1 encrypt_block a 2 encrypt_block a 3 encrypt_block a 4 encrypt_block a 5 add $0x60, SRC add $0x60, DST jmp .Lenc_a_loop .align 8 .Lenc_u_loop: encrypt_block u 0 encrypt_block u 1 encrypt_block u 2 encrypt_block u 3 encrypt_block u 4 encrypt_block u 5 add $0x60, SRC add $0x60, DST jmp .Lenc_u_loop .Lenc_out_0: state_store0 FRAME_END ret .Lenc_out_1: state_store1 FRAME_END ret .Lenc_out_2: state_store2 FRAME_END ret .Lenc_out_3: state_store3 FRAME_END ret .Lenc_out_4: state_store4 FRAME_END ret .Lenc_out_5: state_store5 FRAME_END ret .Lenc_out: FRAME_END ret ENDPROC(crypto_aegis256_aesni_enc) /* * void crypto_aegis256_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis256_aesni_enc_tail) FRAME_BEGIN state_load /* encrypt message: */ call __load_partial movdqa MSG, T0 crypt0 T0 call __store_partial update0 MSG state_store0 FRAME_END ret ENDPROC(crypto_aegis256_aesni_enc_tail) /* * void crypto_aegis256_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis256_aesni_dec) FRAME_BEGIN cmp $0x10, LEN jb .Ldec_out state_load mov SRC, %r8 or DST, %r8 and $0xF, %r8 jnz .Ldec_u_loop .align 8 .Ldec_a_loop: decrypt_block a 0 decrypt_block a 1 decrypt_block a 2 decrypt_block a 3 decrypt_block a 4 decrypt_block a 5 add $0x60, SRC add $0x60, DST jmp .Ldec_a_loop .align 8 .Ldec_u_loop: decrypt_block u 0 decrypt_block u 1 decrypt_block u 2 decrypt_block u 3 decrypt_block u 4 decrypt_block u 5 add $0x60, SRC add $0x60, DST jmp .Ldec_u_loop .Ldec_out_0: state_store0 FRAME_END ret .Ldec_out_1: state_store1 FRAME_END ret .Ldec_out_2: state_store2 FRAME_END ret .Ldec_out_3: state_store3 FRAME_END ret .Ldec_out_4: state_store4 FRAME_END ret .Ldec_out_5: state_store5 FRAME_END ret .Ldec_out: FRAME_END ret ENDPROC(crypto_aegis256_aesni_dec) /* * void crypto_aegis256_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis256_aesni_dec_tail) FRAME_BEGIN state_load /* decrypt message: */ call __load_partial crypt0 MSG movdqa MSG, T0 call __store_partial /* mask with byte count: */ movq LEN, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 movdqa .Laegis256_counter, T1 pcmpgtb T1, T0 pand T0, MSG update0 MSG state_store0 FRAME_END ret ENDPROC(crypto_aegis256_aesni_dec_tail) /* * void crypto_aegis256_aesni_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ ENTRY(crypto_aegis256_aesni_final) FRAME_BEGIN state_load /* prepare length block: */ movq %rdx, MSG movq %rcx, T0 pslldq $8, T0 pxor T0, MSG psllq $3, MSG /* multiply by 8 (to get bit count) */ pxor STATE3, MSG /* update state: */ update0 MSG update1 MSG update2 MSG update3 MSG update4 MSG update5 MSG update0 MSG /* xor tag: */ movdqu (%rsi), MSG pxor STATE0, MSG pxor STATE1, MSG pxor STATE2, MSG pxor STATE3, MSG pxor STATE4, MSG pxor STATE5, MSG movdqu MSG, (%rsi) FRAME_END ret ENDPROC(crypto_aegis256_aesni_final)
AirFortressIlikara/LS2K0300-linux-4.19
14,884
arch/x86/crypto/chacha20-ssse3-x86_64.S
/* * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> .section .rodata.cst16.ROT8, "aM", @progbits, 16 .align 16 ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 .section .rodata.cst16.ROT16, "aM", @progbits, 16 .align 16 ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 .section .rodata.cst16.CTRINC, "aM", @progbits, 16 .align 16 CTRINC: .octa 0x00000003000000020000000100000000 .text ENTRY(chacha20_block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: 1 data block output, o # %rdx: 1 data block input, i # This function encrypts one ChaCha20 block by loading the state matrix # in four SSE registers. It performs matrix operation on four words in # parallel, but requireds shuffling to rearrange the words after each # round. 8/16-bit word rotation is done with the slightly better # performing SSSE3 byte shuffling, 7/12-bit word rotation uses # traditional shift+OR. # x0..3 = s0..3 movdqa 0x00(%rdi),%xmm0 movdqa 0x10(%rdi),%xmm1 movdqa 0x20(%rdi),%xmm2 movdqa 0x30(%rdi),%xmm3 movdqa %xmm0,%xmm8 movdqa %xmm1,%xmm9 movdqa %xmm2,%xmm10 movdqa %xmm3,%xmm11 movdqa ROT8(%rip),%xmm4 movdqa ROT16(%rip),%xmm5 mov $10,%ecx .Ldoubleround: # x0 += x1, x3 = rotl32(x3 ^ x0, 16) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm5,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm6 pslld $12,%xmm6 psrld $20,%xmm1 por %xmm6,%xmm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm4,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm7 pslld $7,%xmm7 psrld $25,%xmm1 por %xmm7,%xmm1 # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) pshufd $0x39,%xmm1,%xmm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) pshufd $0x4e,%xmm2,%xmm2 # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) pshufd $0x93,%xmm3,%xmm3 # x0 += x1, x3 = rotl32(x3 ^ x0, 16) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm5,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm6 pslld $12,%xmm6 psrld $20,%xmm1 por %xmm6,%xmm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm4,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm7 pslld $7,%xmm7 psrld $25,%xmm1 por %xmm7,%xmm1 # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) pshufd $0x93,%xmm1,%xmm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) pshufd $0x4e,%xmm2,%xmm2 # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) pshufd $0x39,%xmm3,%xmm3 dec %ecx jnz .Ldoubleround # o0 = i0 ^ (x0 + s0) movdqu 0x00(%rdx),%xmm4 paddd %xmm8,%xmm0 pxor %xmm4,%xmm0 movdqu %xmm0,0x00(%rsi) # o1 = i1 ^ (x1 + s1) movdqu 0x10(%rdx),%xmm5 paddd %xmm9,%xmm1 pxor %xmm5,%xmm1 movdqu %xmm1,0x10(%rsi) # o2 = i2 ^ (x2 + s2) movdqu 0x20(%rdx),%xmm6 paddd %xmm10,%xmm2 pxor %xmm6,%xmm2 movdqu %xmm2,0x20(%rsi) # o3 = i3 ^ (x3 + s3) movdqu 0x30(%rdx),%xmm7 paddd %xmm11,%xmm3 pxor %xmm7,%xmm3 movdqu %xmm3,0x30(%rsi) ret ENDPROC(chacha20_block_xor_ssse3) ENTRY(chacha20_4block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: 4 data blocks output, o # %rdx: 4 data blocks input, i # This function encrypts four consecutive ChaCha20 blocks by loading the # the state matrix in SSE registers four times. As we need some scratch # registers, we save the first four registers on the stack. The # algorithm performs each operation on the corresponding word of each # state matrix, hence requires no word shuffling. For final XORing step # we transpose the matrix by interleaving 32- and then 64-bit words, # which allows us to do XOR in SSE registers. 8/16-bit word rotation is # done with the slightly better performing SSSE3 byte shuffling, # 7/12-bit word rotation uses traditional shift+OR. lea 8(%rsp),%r10 sub $0x80,%rsp and $~63,%rsp # x0..15[0-3] = s0..3[0..3] movq 0x00(%rdi),%xmm1 pshufd $0x00,%xmm1,%xmm0 pshufd $0x55,%xmm1,%xmm1 movq 0x08(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 movq 0x10(%rdi),%xmm5 pshufd $0x00,%xmm5,%xmm4 pshufd $0x55,%xmm5,%xmm5 movq 0x18(%rdi),%xmm7 pshufd $0x00,%xmm7,%xmm6 pshufd $0x55,%xmm7,%xmm7 movq 0x20(%rdi),%xmm9 pshufd $0x00,%xmm9,%xmm8 pshufd $0x55,%xmm9,%xmm9 movq 0x28(%rdi),%xmm11 pshufd $0x00,%xmm11,%xmm10 pshufd $0x55,%xmm11,%xmm11 movq 0x30(%rdi),%xmm13 pshufd $0x00,%xmm13,%xmm12 pshufd $0x55,%xmm13,%xmm13 movq 0x38(%rdi),%xmm15 pshufd $0x00,%xmm15,%xmm14 pshufd $0x55,%xmm15,%xmm15 # x0..3 on stack movdqa %xmm0,0x00(%rsp) movdqa %xmm1,0x10(%rsp) movdqa %xmm2,0x20(%rsp) movdqa %xmm3,0x30(%rsp) movdqa CTRINC(%rip),%xmm1 movdqa ROT8(%rip),%xmm2 movdqa ROT16(%rip),%xmm3 # x12 += counter values 0-3 paddd %xmm1,%xmm12 mov $10,%ecx .Ldoubleround4: # x0 += x4, x12 = rotl32(x12 ^ x0, 16) movdqa 0x00(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm12 pshufb %xmm3,%xmm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 16) movdqa 0x10(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm13 pshufb %xmm3,%xmm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) movdqa 0x20(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm14 pshufb %xmm3,%xmm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 16) movdqa 0x30(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm15 pshufb %xmm3,%xmm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 12) paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm0 pslld $12,%xmm0 psrld $20,%xmm4 por %xmm0,%xmm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 12) paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm0 pslld $12,%xmm0 psrld $20,%xmm5 por %xmm0,%xmm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm0 pslld $12,%xmm0 psrld $20,%xmm6 por %xmm0,%xmm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 12) paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm0 pslld $12,%xmm0 psrld $20,%xmm7 por %xmm0,%xmm7 # x0 += x4, x12 = rotl32(x12 ^ x0, 8) movdqa 0x00(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm12 pshufb %xmm2,%xmm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 8) movdqa 0x10(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm13 pshufb %xmm2,%xmm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) movdqa 0x20(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm14 pshufb %xmm2,%xmm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 8) movdqa 0x30(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm15 pshufb %xmm2,%xmm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 7) paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm0 pslld $7,%xmm0 psrld $25,%xmm4 por %xmm0,%xmm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 7) paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm0 pslld $7,%xmm0 psrld $25,%xmm5 por %xmm0,%xmm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm0 pslld $7,%xmm0 psrld $25,%xmm6 por %xmm0,%xmm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 7) paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm0 pslld $7,%xmm0 psrld $25,%xmm7 por %xmm0,%xmm7 # x0 += x5, x15 = rotl32(x15 ^ x0, 16) movdqa 0x00(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm15 pshufb %xmm3,%xmm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 16) movdqa 0x10(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm12 pshufb %xmm3,%xmm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 16) movdqa 0x20(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm13 pshufb %xmm3,%xmm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 16) movdqa 0x30(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm14 pshufb %xmm3,%xmm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 12) paddd %xmm15,%xmm10 pxor %xmm10,%xmm5 movdqa %xmm5,%xmm0 pslld $12,%xmm0 psrld $20,%xmm5 por %xmm0,%xmm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) paddd %xmm12,%xmm11 pxor %xmm11,%xmm6 movdqa %xmm6,%xmm0 pslld $12,%xmm0 psrld $20,%xmm6 por %xmm0,%xmm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 12) paddd %xmm13,%xmm8 pxor %xmm8,%xmm7 movdqa %xmm7,%xmm0 pslld $12,%xmm0 psrld $20,%xmm7 por %xmm0,%xmm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 12) paddd %xmm14,%xmm9 pxor %xmm9,%xmm4 movdqa %xmm4,%xmm0 pslld $12,%xmm0 psrld $20,%xmm4 por %xmm0,%xmm4 # x0 += x5, x15 = rotl32(x15 ^ x0, 8) movdqa 0x00(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm15 pshufb %xmm2,%xmm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) movdqa 0x10(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm12 pshufb %xmm2,%xmm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 8) movdqa 0x20(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm13 pshufb %xmm2,%xmm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 8) movdqa 0x30(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm14 pshufb %xmm2,%xmm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 7) paddd %xmm15,%xmm10 pxor %xmm10,%xmm5 movdqa %xmm5,%xmm0 pslld $7,%xmm0 psrld $25,%xmm5 por %xmm0,%xmm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) paddd %xmm12,%xmm11 pxor %xmm11,%xmm6 movdqa %xmm6,%xmm0 pslld $7,%xmm0 psrld $25,%xmm6 por %xmm0,%xmm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 7) paddd %xmm13,%xmm8 pxor %xmm8,%xmm7 movdqa %xmm7,%xmm0 pslld $7,%xmm0 psrld $25,%xmm7 por %xmm0,%xmm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 7) paddd %xmm14,%xmm9 pxor %xmm9,%xmm4 movdqa %xmm4,%xmm0 pslld $7,%xmm0 psrld $25,%xmm4 por %xmm0,%xmm4 dec %ecx jnz .Ldoubleround4 # x0[0-3] += s0[0] # x1[0-3] += s0[1] movq 0x00(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd 0x00(%rsp),%xmm2 movdqa %xmm2,0x00(%rsp) paddd 0x10(%rsp),%xmm3 movdqa %xmm3,0x10(%rsp) # x2[0-3] += s0[2] # x3[0-3] += s0[3] movq 0x08(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd 0x20(%rsp),%xmm2 movdqa %xmm2,0x20(%rsp) paddd 0x30(%rsp),%xmm3 movdqa %xmm3,0x30(%rsp) # x4[0-3] += s1[0] # x5[0-3] += s1[1] movq 0x10(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 # x6[0-3] += s1[2] # x7[0-3] += s1[3] movq 0x18(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm6 paddd %xmm3,%xmm7 # x8[0-3] += s2[0] # x9[0-3] += s2[1] movq 0x20(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm8 paddd %xmm3,%xmm9 # x10[0-3] += s2[2] # x11[0-3] += s2[3] movq 0x28(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm10 paddd %xmm3,%xmm11 # x12[0-3] += s3[0] # x13[0-3] += s3[1] movq 0x30(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm12 paddd %xmm3,%xmm13 # x14[0-3] += s3[2] # x15[0-3] += s3[3] movq 0x38(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm14 paddd %xmm3,%xmm15 # x12 += counter values 0-3 paddd %xmm1,%xmm12 # interleave 32-bit words in state n, n+1 movdqa 0x00(%rsp),%xmm0 movdqa 0x10(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpckldq %xmm1,%xmm2 punpckhdq %xmm1,%xmm0 movdqa %xmm2,0x00(%rsp) movdqa %xmm0,0x10(%rsp) movdqa 0x20(%rsp),%xmm0 movdqa 0x30(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpckldq %xmm1,%xmm2 punpckhdq %xmm1,%xmm0 movdqa %xmm2,0x20(%rsp) movdqa %xmm0,0x30(%rsp) movdqa %xmm4,%xmm0 punpckldq %xmm5,%xmm4 punpckhdq %xmm5,%xmm0 movdqa %xmm0,%xmm5 movdqa %xmm6,%xmm0 punpckldq %xmm7,%xmm6 punpckhdq %xmm7,%xmm0 movdqa %xmm0,%xmm7 movdqa %xmm8,%xmm0 punpckldq %xmm9,%xmm8 punpckhdq %xmm9,%xmm0 movdqa %xmm0,%xmm9 movdqa %xmm10,%xmm0 punpckldq %xmm11,%xmm10 punpckhdq %xmm11,%xmm0 movdqa %xmm0,%xmm11 movdqa %xmm12,%xmm0 punpckldq %xmm13,%xmm12 punpckhdq %xmm13,%xmm0 movdqa %xmm0,%xmm13 movdqa %xmm14,%xmm0 punpckldq %xmm15,%xmm14 punpckhdq %xmm15,%xmm0 movdqa %xmm0,%xmm15 # interleave 64-bit words in state n, n+2 movdqa 0x00(%rsp),%xmm0 movdqa 0x20(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpcklqdq %xmm1,%xmm2 punpckhqdq %xmm1,%xmm0 movdqa %xmm2,0x00(%rsp) movdqa %xmm0,0x20(%rsp) movdqa 0x10(%rsp),%xmm0 movdqa 0x30(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpcklqdq %xmm1,%xmm2 punpckhqdq %xmm1,%xmm0 movdqa %xmm2,0x10(%rsp) movdqa %xmm0,0x30(%rsp) movdqa %xmm4,%xmm0 punpcklqdq %xmm6,%xmm4 punpckhqdq %xmm6,%xmm0 movdqa %xmm0,%xmm6 movdqa %xmm5,%xmm0 punpcklqdq %xmm7,%xmm5 punpckhqdq %xmm7,%xmm0 movdqa %xmm0,%xmm7 movdqa %xmm8,%xmm0 punpcklqdq %xmm10,%xmm8 punpckhqdq %xmm10,%xmm0 movdqa %xmm0,%xmm10 movdqa %xmm9,%xmm0 punpcklqdq %xmm11,%xmm9 punpckhqdq %xmm11,%xmm0 movdqa %xmm0,%xmm11 movdqa %xmm12,%xmm0 punpcklqdq %xmm14,%xmm12 punpckhqdq %xmm14,%xmm0 movdqa %xmm0,%xmm14 movdqa %xmm13,%xmm0 punpcklqdq %xmm15,%xmm13 punpckhqdq %xmm15,%xmm0 movdqa %xmm0,%xmm15 # xor with corresponding input, write to output movdqa 0x00(%rsp),%xmm0 movdqu 0x00(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x00(%rsi) movdqa 0x10(%rsp),%xmm0 movdqu 0x80(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x80(%rsi) movdqa 0x20(%rsp),%xmm0 movdqu 0x40(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x40(%rsi) movdqa 0x30(%rsp),%xmm0 movdqu 0xc0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xc0(%rsi) movdqu 0x10(%rdx),%xmm1 pxor %xmm1,%xmm4 movdqu %xmm4,0x10(%rsi) movdqu 0x90(%rdx),%xmm1 pxor %xmm1,%xmm5 movdqu %xmm5,0x90(%rsi) movdqu 0x50(%rdx),%xmm1 pxor %xmm1,%xmm6 movdqu %xmm6,0x50(%rsi) movdqu 0xd0(%rdx),%xmm1 pxor %xmm1,%xmm7 movdqu %xmm7,0xd0(%rsi) movdqu 0x20(%rdx),%xmm1 pxor %xmm1,%xmm8 movdqu %xmm8,0x20(%rsi) movdqu 0xa0(%rdx),%xmm1 pxor %xmm1,%xmm9 movdqu %xmm9,0xa0(%rsi) movdqu 0x60(%rdx),%xmm1 pxor %xmm1,%xmm10 movdqu %xmm10,0x60(%rsi) movdqu 0xe0(%rdx),%xmm1 pxor %xmm1,%xmm11 movdqu %xmm11,0xe0(%rsi) movdqu 0x30(%rdx),%xmm1 pxor %xmm1,%xmm12 movdqu %xmm12,0x30(%rsi) movdqu 0xb0(%rdx),%xmm1 pxor %xmm1,%xmm13 movdqu %xmm13,0xb0(%rsi) movdqu 0x70(%rdx),%xmm1 pxor %xmm1,%xmm14 movdqu %xmm14,0x70(%rsi) movdqu 0xf0(%rdx),%xmm1 pxor %xmm1,%xmm15 movdqu %xmm15,0xf0(%rsi) lea -8(%r10),%rsp ret ENDPROC(chacha20_4block_xor_ssse3)
AirFortressIlikara/LS2K0300-linux-4.19
13,461
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
/* * Implement fast CRC32C with PCLMULQDQ instructions. (x86_64) * * The white papers on CRC32C calculations with PCLMULQDQ instruction can be * downloaded from: * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/crc-iscsi-polynomial-crc32-instruction-paper.pdf * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-paper.pdf * * Copyright (C) 2012 Intel Corporation. * * Authors: * Wajdi Feghali <wajdi.k.feghali@intel.com> * James Guilford <james.guilford@intel.com> * David Cote <david.m.cote@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <asm/inst.h> #include <linux/linkage.h> #include <asm/nospec-branch.h> ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction .macro LABEL prefix n \prefix\n\(): .endm .macro JMPTBL_ENTRY i .word crc_\i - crc_array .endm .macro JNC_LESS_THAN j jnc less_than_\j .endm # Define threshold where buffers are considered "small" and routed to more # efficient "by-1" code. This "by-1" code only handles up to 255 bytes, so # SMALL_SIZE can be no larger than 255. #define SMALL_SIZE 200 .if (SMALL_SIZE > 255) .error "SMALL_ SIZE must be < 256" .endif # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); .text ENTRY(crc_pcl) #define bufp %rdi #define bufp_dw %edi #define bufp_w %di #define bufp_b %dil #define bufptmp %rcx #define block_0 %rcx #define block_1 %rdx #define block_2 %r11 #define len %rsi #define len_dw %esi #define len_w %si #define len_b %sil #define crc_init_arg %rdx #define tmp %rbx #define crc_init %r8 #define crc_init_dw %r8d #define crc1 %r9 #define crc2 %r10 pushq %rbx pushq %rdi pushq %rsi ## Move crc_init for Linux to a different mov crc_init_arg, crc_init ################################################################ ## 1) ALIGN: ################################################################ mov bufp, bufptmp # rdi = *buf neg bufp and $7, bufp # calculate the unalignment amount of # the address je proc_block # Skip if aligned ## If len is less than 8 and we're unaligned, we need to jump ## to special code to avoid reading beyond the end of the buffer cmp $8, len jae do_align # less_than_8 expects length in upper 3 bits of len_dw # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30] shl $32-3+1, len_dw jmp less_than_8_post_shl1 do_align: #### Calculate CRC of unaligned bytes of the buffer (if any) movq (bufptmp), tmp # load a quadward from the buffer add bufp, bufptmp # align buffer pointer for quadword # processing sub bufp, len # update buffer length align_loop: crc32b %bl, crc_init_dw # compute crc32 of 1-byte shr $8, tmp # get next byte dec bufp jne align_loop proc_block: ################################################################ ## 2) PROCESS BLOCKS: ################################################################ ## compute num of bytes to be processed movq len, tmp # save num bytes in tmp cmpq $128*24, len jae full_block continue_block: cmpq $SMALL_SIZE, len jb small ## len < 128*24 movq $2731, %rax # 2731 = ceil(2^16 / 24) mul len_dw shrq $16, %rax ## eax contains floor(bytes / 24) = num 24-byte chunks to do ## process rax 24-byte chunks (128 >= rax >= 0) ## compute end address of each block ## block 0 (base addr + RAX * 8) ## block 1 (base addr + RAX * 16) ## block 2 (base addr + RAX * 24) lea (bufptmp, %rax, 8), block_0 lea (block_0, %rax, 8), block_1 lea (block_1, %rax, 8), block_2 xor crc1, crc1 xor crc2, crc2 ## branch into array lea jump_table(%rip), bufp movzwq (bufp, %rax, 2), len lea crc_array(%rip), bufp lea (bufp, len, 1), bufp JMP_NOSPEC bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: ################################################################ full_block: movl $128,%eax lea 128*8*2(block_0), block_1 lea 128*8*3(block_0), block_2 add $128*8*1, block_0 xor crc1,crc1 xor crc2,crc2 # Fall thruogh into top of crc array (crc_128) ################################################################ ## 3) CRC Array: ################################################################ crc_array: i=128 .rept 128-1 .altmacro LABEL crc_ %i .noaltmacro crc32q -i*8(block_0), crc_init crc32q -i*8(block_1), crc1 crc32q -i*8(block_2), crc2 i=(i-1) .endr .altmacro LABEL crc_ %i .noaltmacro crc32q -i*8(block_0), crc_init crc32q -i*8(block_1), crc1 # SKIP crc32 -i*8(block_2), crc2 ; Don't do this one yet mov block_2, block_0 ################################################################ ## 4) Combine three results: ################################################################ lea (K_table-8)(%rip), bufp # first entry is for idx 1 shlq $3, %rax # rax *= 8 pmovzxdq (bufp,%rax), %xmm0 # 2 consts: K1:K2 leal (%eax,%eax,2), %eax # rax *= 3 (total *24) subq %rax, tmp # tmp -= rax*24 movq crc_init, %xmm1 # CRC for block 1 PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 movq crc1, %xmm2 # CRC for block 2 PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1 pxor %xmm2,%xmm1 movq %xmm1, %rax xor -i*8(block_2), %rax mov crc2, crc_init crc32 %rax, crc_init ################################################################ ## 5) Check for end: ################################################################ LABEL crc_ 0 mov tmp, len cmp $128*24, tmp jae full_block cmp $24, tmp jae continue_block less_than_24: shl $32-4, len_dw # less_than_16 expects length # in upper 4 bits of len_dw jnc less_than_16 crc32q (bufptmp), crc_init crc32q 8(bufptmp), crc_init jz do_return add $16, bufptmp # len is less than 8 if we got here # less_than_8 expects length in upper 3 bits of len_dw # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30] shl $2, len_dw jmp less_than_8_post_shl1 ####################################################################### ## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full) ####################################################################### small: shl $32-8, len_dw # Prepare len_dw for less_than_256 j=256 .rept 5 # j = {256, 128, 64, 32, 16} .altmacro LABEL less_than_ %j # less_than_j: Length should be in # upper lg(j) bits of len_dw j=(j/2) shl $1, len_dw # Get next MSB JNC_LESS_THAN %j .noaltmacro i=0 .rept (j/8) crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data i=i+8 .endr jz do_return # Return if remaining length is zero add $j, bufptmp # Advance buf .endr less_than_8: # Length should be stored in # upper 3 bits of len_dw shl $1, len_dw less_than_8_post_shl1: jnc less_than_4 crc32l (bufptmp), crc_init_dw # CRC of 4 bytes jz do_return # return if remaining data is zero add $4, bufptmp less_than_4: # Length should be stored in # upper 2 bits of len_dw shl $1, len_dw jnc less_than_2 crc32w (bufptmp), crc_init_dw # CRC of 2 bytes jz do_return # return if remaining data is zero add $2, bufptmp less_than_2: # Length should be stored in the MSB # of len_dw shl $1, len_dw jnc less_than_1 crc32b (bufptmp), crc_init_dw # CRC of 1 byte less_than_1: # Length should be zero do_return: movq crc_init, %rax popq %rsi popq %rdi popq %rbx ret ENDPROC(crc_pcl) .section .rodata, "a", @progbits ################################################################ ## jump table Table is 129 entries x 2 bytes each ################################################################ .align 4 jump_table: i=0 .rept 129 .altmacro JMPTBL_ENTRY %i .noaltmacro i=i+1 .endr ################################################################ ## PCLMULQDQ tables ## Table is 128 entries x 2 words (8 bytes) each ################################################################ .align 8 K_table: .long 0x493c7d27, 0x00000001 .long 0xba4fc28e, 0x493c7d27 .long 0xddc0152b, 0xf20c0dfe .long 0x9e4addf8, 0xba4fc28e .long 0x39d3b296, 0x3da6d0cb .long 0x0715ce53, 0xddc0152b .long 0x47db8317, 0x1c291d04 .long 0x0d3b6092, 0x9e4addf8 .long 0xc96cfdc0, 0x740eef02 .long 0x878a92a7, 0x39d3b296 .long 0xdaece73e, 0x083a6eec .long 0xab7aff2a, 0x0715ce53 .long 0x2162d385, 0xc49f4f67 .long 0x83348832, 0x47db8317 .long 0x299847d5, 0x2ad91c30 .long 0xb9e02b86, 0x0d3b6092 .long 0x18b33a4e, 0x6992cea2 .long 0xb6dd949b, 0xc96cfdc0 .long 0x78d9ccb7, 0x7e908048 .long 0xbac2fd7b, 0x878a92a7 .long 0xa60ce07b, 0x1b3d8f29 .long 0xce7f39f4, 0xdaece73e .long 0x61d82e56, 0xf1d0f55e .long 0xd270f1a2, 0xab7aff2a .long 0xc619809d, 0xa87ab8a8 .long 0x2b3cac5d, 0x2162d385 .long 0x65863b64, 0x8462d800 .long 0x1b03397f, 0x83348832 .long 0xebb883bd, 0x71d111a8 .long 0xb3e32c28, 0x299847d5 .long 0x064f7f26, 0xffd852c6 .long 0xdd7e3b0c, 0xb9e02b86 .long 0xf285651c, 0xdcb17aa4 .long 0x10746f3c, 0x18b33a4e .long 0xc7a68855, 0xf37c5aee .long 0x271d9844, 0xb6dd949b .long 0x8e766a0c, 0x6051d5a2 .long 0x93a5f730, 0x78d9ccb7 .long 0x6cb08e5c, 0x18b0d4ff .long 0x6b749fb2, 0xbac2fd7b .long 0x1393e203, 0x21f3d99c .long 0xcec3662e, 0xa60ce07b .long 0x96c515bb, 0x8f158014 .long 0xe6fc4e6a, 0xce7f39f4 .long 0x8227bb8a, 0xa00457f7 .long 0xb0cd4768, 0x61d82e56 .long 0x39c7ff35, 0x8d6d2c43 .long 0xd7a4825c, 0xd270f1a2 .long 0x0ab3844b, 0x00ac29cf .long 0x0167d312, 0xc619809d .long 0xf6076544, 0xe9adf796 .long 0x26f6a60a, 0x2b3cac5d .long 0xa741c1bf, 0x96638b34 .long 0x98d8d9cb, 0x65863b64 .long 0x49c3cc9c, 0xe0e9f351 .long 0x68bce87a, 0x1b03397f .long 0x57a3d037, 0x9af01f2d .long 0x6956fc3b, 0xebb883bd .long 0x42d98888, 0x2cff42cf .long 0x3771e98f, 0xb3e32c28 .long 0xb42ae3d9, 0x88f25a3a .long 0x2178513a, 0x064f7f26 .long 0xe0ac139e, 0x4e36f0b0 .long 0x170076fa, 0xdd7e3b0c .long 0x444dd413, 0xbd6f81f8 .long 0x6f345e45, 0xf285651c .long 0x41d17b64, 0x91c9bd4b .long 0xff0dba97, 0x10746f3c .long 0xa2b73df1, 0x885f087b .long 0xf872e54c, 0xc7a68855 .long 0x1e41e9fc, 0x4c144932 .long 0x86d8e4d2, 0x271d9844 .long 0x651bd98b, 0x52148f02 .long 0x5bb8f1bc, 0x8e766a0c .long 0xa90fd27a, 0xa3c6f37a .long 0xb3af077a, 0x93a5f730 .long 0x4984d782, 0xd7c0557f .long 0xca6ef3ac, 0x6cb08e5c .long 0x234e0b26, 0x63ded06a .long 0xdd66cbbb, 0x6b749fb2 .long 0x4597456a, 0x4d56973c .long 0xe9e28eb4, 0x1393e203 .long 0x7b3ff57a, 0x9669c9df .long 0xc9c8b782, 0xcec3662e .long 0x3f70cc6f, 0xe417f38a .long 0x93e106a4, 0x96c515bb .long 0x62ec6c6d, 0x4b9e0f71 .long 0xd813b325, 0xe6fc4e6a .long 0x0df04680, 0xd104b8fc .long 0x2342001e, 0x8227bb8a .long 0x0a2a8d7e, 0x5b397730 .long 0x6d9a4957, 0xb0cd4768 .long 0xe8b6368b, 0xe78eb416 .long 0xd2c3ed1a, 0x39c7ff35 .long 0x995a5724, 0x61ff0e01 .long 0x9ef68d35, 0xd7a4825c .long 0x0c139b31, 0x8d96551c .long 0xf2271e60, 0x0ab3844b .long 0x0b0bf8ca, 0x0bf80dd2 .long 0x2664fd8b, 0x0167d312 .long 0xed64812d, 0x8821abed .long 0x02ee03b2, 0xf6076544 .long 0x8604ae0f, 0x6a45d2b2 .long 0x363bd6b3, 0x26f6a60a .long 0x135c83fd, 0xd8d26619 .long 0x5fabe670, 0xa741c1bf .long 0x35ec3279, 0xde87806c .long 0x00bcf5f6, 0x98d8d9cb .long 0x8ae00689, 0x14338754 .long 0x17f27698, 0x49c3cc9c .long 0x58ca5f00, 0x5bd2011f .long 0xaa7c7ad5, 0x68bce87a .long 0xb5cfca28, 0xdd07448e .long 0xded288f8, 0x57a3d037 .long 0x59f229bc, 0xdde8f5b9 .long 0x6d390dec, 0x6956fc3b .long 0x37170390, 0xa3e3e02c .long 0x6353c1cc, 0x42d98888 .long 0xc4584f5c, 0xd73c7bea .long 0xf48642e9, 0x3771e98f .long 0x531377e2, 0x80ff0093 .long 0xdd35bc8d, 0xb42ae3d9 .long 0xb25b29f2, 0x8fe4c34d .long 0x9a5ede41, 0x2178513a .long 0xa563905d, 0xdf99fc11 .long 0x45cddf4e, 0xe0ac139e .long 0xacfa3103, 0x6c23e841 .long 0xa51b6135, 0x170076fa
AirFortressIlikara/LS2K0300-linux-4.19
18,792
arch/x86/crypto/morus1280-sse2-asm.S
/* * SSE2 implementation of MORUS-1280 * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/frame.h> #define SHUFFLE_MASK(i0, i1, i2, i3) \ (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6)) #define MASK2 SHUFFLE_MASK(2, 3, 0, 1) #define STATE0_LO %xmm0 #define STATE0_HI %xmm1 #define STATE1_LO %xmm2 #define STATE1_HI %xmm3 #define STATE2_LO %xmm4 #define STATE2_HI %xmm5 #define STATE3_LO %xmm6 #define STATE3_HI %xmm7 #define STATE4_LO %xmm8 #define STATE4_HI %xmm9 #define KEY_LO %xmm10 #define KEY_HI %xmm11 #define MSG_LO %xmm10 #define MSG_HI %xmm11 #define T0_LO %xmm12 #define T0_HI %xmm13 #define T1_LO %xmm14 #define T1_HI %xmm15 .section .rodata.cst16.morus640_const, "aM", @progbits, 16 .align 16 .Lmorus640_const_0: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .Lmorus640_const_1: .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst16.morus640_counter, "aM", @progbits, 16 .align 16 .Lmorus640_counter_0: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .Lmorus640_counter_1: .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f .text .macro rol1 hi, lo /* * HI_1 | HI_0 || LO_1 | LO_0 * ==> * HI_0 | HI_1 || LO_1 | LO_0 * ==> * HI_0 | LO_1 || LO_0 | HI_1 */ pshufd $MASK2, \hi, \hi movdqa \hi, T0_LO punpcklqdq \lo, T0_LO punpckhqdq \hi, \lo movdqa \lo, \hi movdqa T0_LO, \lo .endm .macro rol2 hi, lo movdqa \lo, T0_LO movdqa \hi, \lo movdqa T0_LO, \hi .endm .macro rol3 hi, lo /* * HI_1 | HI_0 || LO_1 | LO_0 * ==> * HI_0 | HI_1 || LO_1 | LO_0 * ==> * LO_0 | HI_1 || HI_0 | LO_1 */ pshufd $MASK2, \hi, \hi movdqa \lo, T0_LO punpckhqdq \hi, T0_LO punpcklqdq \lo, \hi movdqa T0_LO, \lo .endm .macro morus1280_round s0_l, s0_h, s1_l, s1_h, s2_l, s2_h, s3_l, s3_h, s4_l, s4_h, b, w movdqa \s1_l, T0_LO pand \s2_l, T0_LO pxor T0_LO, \s0_l movdqa \s1_h, T0_LO pand \s2_h, T0_LO pxor T0_LO, \s0_h pxor \s3_l, \s0_l pxor \s3_h, \s0_h movdqa \s0_l, T0_LO psllq $\b, T0_LO psrlq $(64 - \b), \s0_l pxor T0_LO, \s0_l movdqa \s0_h, T0_LO psllq $\b, T0_LO psrlq $(64 - \b), \s0_h pxor T0_LO, \s0_h \w \s3_h, \s3_l .endm /* * __morus1280_update: internal ABI * input: * STATE[0-4] - input state * MSG - message block * output: * STATE[0-4] - output state * changed: * T0 */ __morus1280_update: morus1280_round \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ 13, rol1 pxor MSG_LO, STATE1_LO pxor MSG_HI, STATE1_HI morus1280_round \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ 46, rol2 pxor MSG_LO, STATE2_LO pxor MSG_HI, STATE2_HI morus1280_round \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ 38, rol3 pxor MSG_LO, STATE3_LO pxor MSG_HI, STATE3_HI morus1280_round \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ 7, rol2 pxor MSG_LO, STATE4_LO pxor MSG_HI, STATE4_HI morus1280_round \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ 4, rol1 ret ENDPROC(__morus1280_update) /* * __morus1280_update_zero: internal ABI * input: * STATE[0-4] - input state * output: * STATE[0-4] - output state * changed: * T0 */ __morus1280_update_zero: morus1280_round \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ 13, rol1 morus1280_round \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ 46, rol2 morus1280_round \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ 38, rol3 morus1280_round \ STATE3_LO, STATE3_HI, \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ 7, rol2 morus1280_round \ STATE4_LO, STATE4_HI, \ STATE0_LO, STATE0_HI, \ STATE1_LO, STATE1_HI, \ STATE2_LO, STATE2_HI, \ STATE3_LO, STATE3_HI, \ 4, rol1 ret ENDPROC(__morus1280_update_zero) /* * __load_partial: internal ABI * input: * %rsi - src * %rcx - bytes * output: * MSG - message block * changed: * %r8 * %r9 */ __load_partial: xor %r9d, %r9d pxor MSG_LO, MSG_LO pxor MSG_HI, MSG_HI mov %rcx, %r8 and $0x1, %r8 jz .Lld_partial_1 mov %rcx, %r8 and $0x1E, %r8 add %rsi, %r8 mov (%r8), %r9b .Lld_partial_1: mov %rcx, %r8 and $0x2, %r8 jz .Lld_partial_2 mov %rcx, %r8 and $0x1C, %r8 add %rsi, %r8 shl $16, %r9 mov (%r8), %r9w .Lld_partial_2: mov %rcx, %r8 and $0x4, %r8 jz .Lld_partial_4 mov %rcx, %r8 and $0x18, %r8 add %rsi, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG_LO mov %rcx, %r8 and $0x8, %r8 jz .Lld_partial_8 mov %rcx, %r8 and $0x10, %r8 add %rsi, %r8 pslldq $8, MSG_LO movq (%r8), T0_LO pxor T0_LO, MSG_LO .Lld_partial_8: mov %rcx, %r8 and $0x10, %r8 jz .Lld_partial_16 movdqa MSG_LO, MSG_HI movdqu (%rsi), MSG_LO .Lld_partial_16: ret ENDPROC(__load_partial) /* * __store_partial: internal ABI * input: * %rdx - dst * %rcx - bytes * output: * T0 - message block * changed: * %r8 * %r9 * %r10 */ __store_partial: mov %rcx, %r8 mov %rdx, %r9 cmp $16, %r8 jl .Lst_partial_16 movdqu T0_LO, (%r9) movdqa T0_HI, T0_LO sub $16, %r8 add $16, %r9 .Lst_partial_16: movq T0_LO, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) psrldq $8, T0_LO movq T0_LO, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $16, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: ret ENDPROC(__store_partial) /* * void crypto_morus1280_sse2_init(void *state, const void *key, * const void *iv); */ ENTRY(crypto_morus1280_sse2_init) FRAME_BEGIN /* load IV: */ pxor STATE0_HI, STATE0_HI movdqu (%rdx), STATE0_LO /* load key: */ movdqu 0(%rsi), KEY_LO movdqu 16(%rsi), KEY_HI movdqa KEY_LO, STATE1_LO movdqa KEY_HI, STATE1_HI /* load all ones: */ pcmpeqd STATE2_LO, STATE2_LO pcmpeqd STATE2_HI, STATE2_HI /* load all zeros: */ pxor STATE3_LO, STATE3_LO pxor STATE3_HI, STATE3_HI /* load the constant: */ movdqa .Lmorus640_const_0, STATE4_LO movdqa .Lmorus640_const_1, STATE4_HI /* update 16 times with zero: */ call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero /* xor-in the key again after updates: */ pxor KEY_LO, STATE1_LO pxor KEY_HI, STATE1_HI /* store the state: */ movdqu STATE0_LO, (0 * 16)(%rdi) movdqu STATE0_HI, (1 * 16)(%rdi) movdqu STATE1_LO, (2 * 16)(%rdi) movdqu STATE1_HI, (3 * 16)(%rdi) movdqu STATE2_LO, (4 * 16)(%rdi) movdqu STATE2_HI, (5 * 16)(%rdi) movdqu STATE3_LO, (6 * 16)(%rdi) movdqu STATE3_HI, (7 * 16)(%rdi) movdqu STATE4_LO, (8 * 16)(%rdi) movdqu STATE4_HI, (9 * 16)(%rdi) FRAME_END ret ENDPROC(crypto_morus1280_sse2_init) /* * void crypto_morus1280_sse2_ad(void *state, const void *data, * unsigned int length); */ ENTRY(crypto_morus1280_sse2_ad) FRAME_BEGIN cmp $32, %rdx jb .Lad_out /* load the state: */ movdqu (0 * 16)(%rdi), STATE0_LO movdqu (1 * 16)(%rdi), STATE0_HI movdqu (2 * 16)(%rdi), STATE1_LO movdqu (3 * 16)(%rdi), STATE1_HI movdqu (4 * 16)(%rdi), STATE2_LO movdqu (5 * 16)(%rdi), STATE2_HI movdqu (6 * 16)(%rdi), STATE3_LO movdqu (7 * 16)(%rdi), STATE3_HI movdqu (8 * 16)(%rdi), STATE4_LO movdqu (9 * 16)(%rdi), STATE4_HI mov %rsi, %r8 and $0xF, %r8 jnz .Lad_u_loop .align 4 .Lad_a_loop: movdqa 0(%rsi), MSG_LO movdqa 16(%rsi), MSG_HI call __morus1280_update sub $32, %rdx add $32, %rsi cmp $32, %rdx jge .Lad_a_loop jmp .Lad_cont .align 4 .Lad_u_loop: movdqu 0(%rsi), MSG_LO movdqu 16(%rsi), MSG_HI call __morus1280_update sub $32, %rdx add $32, %rsi cmp $32, %rdx jge .Lad_u_loop .Lad_cont: /* store the state: */ movdqu STATE0_LO, (0 * 16)(%rdi) movdqu STATE0_HI, (1 * 16)(%rdi) movdqu STATE1_LO, (2 * 16)(%rdi) movdqu STATE1_HI, (3 * 16)(%rdi) movdqu STATE2_LO, (4 * 16)(%rdi) movdqu STATE2_HI, (5 * 16)(%rdi) movdqu STATE3_LO, (6 * 16)(%rdi) movdqu STATE3_HI, (7 * 16)(%rdi) movdqu STATE4_LO, (8 * 16)(%rdi) movdqu STATE4_HI, (9 * 16)(%rdi) .Lad_out: FRAME_END ret ENDPROC(crypto_morus1280_sse2_ad) /* * void crypto_morus1280_sse2_enc(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_sse2_enc) FRAME_BEGIN cmp $32, %rcx jb .Lenc_out /* load the state: */ movdqu (0 * 16)(%rdi), STATE0_LO movdqu (1 * 16)(%rdi), STATE0_HI movdqu (2 * 16)(%rdi), STATE1_LO movdqu (3 * 16)(%rdi), STATE1_HI movdqu (4 * 16)(%rdi), STATE2_LO movdqu (5 * 16)(%rdi), STATE2_HI movdqu (6 * 16)(%rdi), STATE3_LO movdqu (7 * 16)(%rdi), STATE3_HI movdqu (8 * 16)(%rdi), STATE4_LO movdqu (9 * 16)(%rdi), STATE4_HI mov %rsi, %r8 or %rdx, %r8 and $0xF, %r8 jnz .Lenc_u_loop .align 4 .Lenc_a_loop: movdqa 0(%rsi), MSG_LO movdqa 16(%rsi), MSG_HI movdqa STATE1_LO, T1_LO movdqa STATE1_HI, T1_HI rol3 T1_HI, T1_LO movdqa MSG_LO, T0_LO movdqa MSG_HI, T0_HI pxor T1_LO, T0_LO pxor T1_HI, T0_HI pxor STATE0_LO, T0_LO pxor STATE0_HI, T0_HI movdqa STATE2_LO, T1_LO movdqa STATE2_HI, T1_HI pand STATE3_LO, T1_LO pand STATE3_HI, T1_HI pxor T1_LO, T0_LO pxor T1_HI, T0_HI movdqa T0_LO, 0(%rdx) movdqa T0_HI, 16(%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Lenc_a_loop jmp .Lenc_cont .align 4 .Lenc_u_loop: movdqu 0(%rsi), MSG_LO movdqu 16(%rsi), MSG_HI movdqa STATE1_LO, T1_LO movdqa STATE1_HI, T1_HI rol3 T1_HI, T1_LO movdqa MSG_LO, T0_LO movdqa MSG_HI, T0_HI pxor T1_LO, T0_LO pxor T1_HI, T0_HI pxor STATE0_LO, T0_LO pxor STATE0_HI, T0_HI movdqa STATE2_LO, T1_LO movdqa STATE2_HI, T1_HI pand STATE3_LO, T1_LO pand STATE3_HI, T1_HI pxor T1_LO, T0_LO pxor T1_HI, T0_HI movdqu T0_LO, 0(%rdx) movdqu T0_HI, 16(%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Lenc_u_loop .Lenc_cont: /* store the state: */ movdqu STATE0_LO, (0 * 16)(%rdi) movdqu STATE0_HI, (1 * 16)(%rdi) movdqu STATE1_LO, (2 * 16)(%rdi) movdqu STATE1_HI, (3 * 16)(%rdi) movdqu STATE2_LO, (4 * 16)(%rdi) movdqu STATE2_HI, (5 * 16)(%rdi) movdqu STATE3_LO, (6 * 16)(%rdi) movdqu STATE3_HI, (7 * 16)(%rdi) movdqu STATE4_LO, (8 * 16)(%rdi) movdqu STATE4_HI, (9 * 16)(%rdi) .Lenc_out: FRAME_END ret ENDPROC(crypto_morus1280_sse2_enc) /* * void crypto_morus1280_sse2_enc_tail(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_sse2_enc_tail) FRAME_BEGIN /* load the state: */ movdqu (0 * 16)(%rdi), STATE0_LO movdqu (1 * 16)(%rdi), STATE0_HI movdqu (2 * 16)(%rdi), STATE1_LO movdqu (3 * 16)(%rdi), STATE1_HI movdqu (4 * 16)(%rdi), STATE2_LO movdqu (5 * 16)(%rdi), STATE2_HI movdqu (6 * 16)(%rdi), STATE3_LO movdqu (7 * 16)(%rdi), STATE3_HI movdqu (8 * 16)(%rdi), STATE4_LO movdqu (9 * 16)(%rdi), STATE4_HI /* encrypt message: */ call __load_partial movdqa STATE1_LO, T1_LO movdqa STATE1_HI, T1_HI rol3 T1_HI, T1_LO movdqa MSG_LO, T0_LO movdqa MSG_HI, T0_HI pxor T1_LO, T0_LO pxor T1_HI, T0_HI pxor STATE0_LO, T0_LO pxor STATE0_HI, T0_HI movdqa STATE2_LO, T1_LO movdqa STATE2_HI, T1_HI pand STATE3_LO, T1_LO pand STATE3_HI, T1_HI pxor T1_LO, T0_LO pxor T1_HI, T0_HI call __store_partial call __morus1280_update /* store the state: */ movdqu STATE0_LO, (0 * 16)(%rdi) movdqu STATE0_HI, (1 * 16)(%rdi) movdqu STATE1_LO, (2 * 16)(%rdi) movdqu STATE1_HI, (3 * 16)(%rdi) movdqu STATE2_LO, (4 * 16)(%rdi) movdqu STATE2_HI, (5 * 16)(%rdi) movdqu STATE3_LO, (6 * 16)(%rdi) movdqu STATE3_HI, (7 * 16)(%rdi) movdqu STATE4_LO, (8 * 16)(%rdi) movdqu STATE4_HI, (9 * 16)(%rdi) FRAME_END ret ENDPROC(crypto_morus1280_sse2_enc_tail) /* * void crypto_morus1280_sse2_dec(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_sse2_dec) FRAME_BEGIN cmp $32, %rcx jb .Ldec_out /* load the state: */ movdqu (0 * 16)(%rdi), STATE0_LO movdqu (1 * 16)(%rdi), STATE0_HI movdqu (2 * 16)(%rdi), STATE1_LO movdqu (3 * 16)(%rdi), STATE1_HI movdqu (4 * 16)(%rdi), STATE2_LO movdqu (5 * 16)(%rdi), STATE2_HI movdqu (6 * 16)(%rdi), STATE3_LO movdqu (7 * 16)(%rdi), STATE3_HI movdqu (8 * 16)(%rdi), STATE4_LO movdqu (9 * 16)(%rdi), STATE4_HI mov %rsi, %r8 or %rdx, %r8 and $0xF, %r8 jnz .Ldec_u_loop .align 4 .Ldec_a_loop: movdqa 0(%rsi), MSG_LO movdqa 16(%rsi), MSG_HI pxor STATE0_LO, MSG_LO pxor STATE0_HI, MSG_HI movdqa STATE1_LO, T1_LO movdqa STATE1_HI, T1_HI rol3 T1_HI, T1_LO pxor T1_LO, MSG_LO pxor T1_HI, MSG_HI movdqa STATE2_LO, T1_LO movdqa STATE2_HI, T1_HI pand STATE3_LO, T1_LO pand STATE3_HI, T1_HI pxor T1_LO, MSG_LO pxor T1_HI, MSG_HI movdqa MSG_LO, 0(%rdx) movdqa MSG_HI, 16(%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Ldec_a_loop jmp .Ldec_cont .align 4 .Ldec_u_loop: movdqu 0(%rsi), MSG_LO movdqu 16(%rsi), MSG_HI pxor STATE0_LO, MSG_LO pxor STATE0_HI, MSG_HI movdqa STATE1_LO, T1_LO movdqa STATE1_HI, T1_HI rol3 T1_HI, T1_LO pxor T1_LO, MSG_LO pxor T1_HI, MSG_HI movdqa STATE2_LO, T1_LO movdqa STATE2_HI, T1_HI pand STATE3_LO, T1_LO pand STATE3_HI, T1_HI pxor T1_LO, MSG_LO pxor T1_HI, MSG_HI movdqu MSG_LO, 0(%rdx) movdqu MSG_HI, 16(%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Ldec_u_loop .Ldec_cont: /* store the state: */ movdqu STATE0_LO, (0 * 16)(%rdi) movdqu STATE0_HI, (1 * 16)(%rdi) movdqu STATE1_LO, (2 * 16)(%rdi) movdqu STATE1_HI, (3 * 16)(%rdi) movdqu STATE2_LO, (4 * 16)(%rdi) movdqu STATE2_HI, (5 * 16)(%rdi) movdqu STATE3_LO, (6 * 16)(%rdi) movdqu STATE3_HI, (7 * 16)(%rdi) movdqu STATE4_LO, (8 * 16)(%rdi) movdqu STATE4_HI, (9 * 16)(%rdi) .Ldec_out: FRAME_END ret ENDPROC(crypto_morus1280_sse2_dec) /* * void crypto_morus1280_sse2_dec_tail(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_sse2_dec_tail) FRAME_BEGIN /* load the state: */ movdqu (0 * 16)(%rdi), STATE0_LO movdqu (1 * 16)(%rdi), STATE0_HI movdqu (2 * 16)(%rdi), STATE1_LO movdqu (3 * 16)(%rdi), STATE1_HI movdqu (4 * 16)(%rdi), STATE2_LO movdqu (5 * 16)(%rdi), STATE2_HI movdqu (6 * 16)(%rdi), STATE3_LO movdqu (7 * 16)(%rdi), STATE3_HI movdqu (8 * 16)(%rdi), STATE4_LO movdqu (9 * 16)(%rdi), STATE4_HI /* decrypt message: */ call __load_partial pxor STATE0_LO, MSG_LO pxor STATE0_HI, MSG_HI movdqa STATE1_LO, T1_LO movdqa STATE1_HI, T1_HI rol3 T1_HI, T1_LO pxor T1_LO, MSG_LO pxor T1_HI, MSG_HI movdqa STATE2_LO, T1_LO movdqa STATE2_HI, T1_HI pand STATE3_LO, T1_LO pand STATE3_HI, T1_HI pxor T1_LO, MSG_LO pxor T1_HI, MSG_HI movdqa MSG_LO, T0_LO movdqa MSG_HI, T0_HI call __store_partial /* mask with byte count: */ movq %rcx, T0_LO punpcklbw T0_LO, T0_LO punpcklbw T0_LO, T0_LO punpcklbw T0_LO, T0_LO punpcklbw T0_LO, T0_LO movdqa T0_LO, T0_HI movdqa .Lmorus640_counter_0, T1_LO movdqa .Lmorus640_counter_1, T1_HI pcmpgtb T1_LO, T0_LO pcmpgtb T1_HI, T0_HI pand T0_LO, MSG_LO pand T0_HI, MSG_HI call __morus1280_update /* store the state: */ movdqu STATE0_LO, (0 * 16)(%rdi) movdqu STATE0_HI, (1 * 16)(%rdi) movdqu STATE1_LO, (2 * 16)(%rdi) movdqu STATE1_HI, (3 * 16)(%rdi) movdqu STATE2_LO, (4 * 16)(%rdi) movdqu STATE2_HI, (5 * 16)(%rdi) movdqu STATE3_LO, (6 * 16)(%rdi) movdqu STATE3_HI, (7 * 16)(%rdi) movdqu STATE4_LO, (8 * 16)(%rdi) movdqu STATE4_HI, (9 * 16)(%rdi) FRAME_END ret ENDPROC(crypto_morus1280_sse2_dec_tail) /* * void crypto_morus1280_sse2_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ ENTRY(crypto_morus1280_sse2_final) FRAME_BEGIN /* load the state: */ movdqu (0 * 16)(%rdi), STATE0_LO movdqu (1 * 16)(%rdi), STATE0_HI movdqu (2 * 16)(%rdi), STATE1_LO movdqu (3 * 16)(%rdi), STATE1_HI movdqu (4 * 16)(%rdi), STATE2_LO movdqu (5 * 16)(%rdi), STATE2_HI movdqu (6 * 16)(%rdi), STATE3_LO movdqu (7 * 16)(%rdi), STATE3_HI movdqu (8 * 16)(%rdi), STATE4_LO movdqu (9 * 16)(%rdi), STATE4_HI /* xor state[0] into state[4]: */ pxor STATE0_LO, STATE4_LO pxor STATE0_HI, STATE4_HI /* prepare length block: */ movq %rdx, MSG_LO movq %rcx, T0_LO pslldq $8, T0_LO pxor T0_LO, MSG_LO psllq $3, MSG_LO /* multiply by 8 (to get bit count) */ pxor MSG_HI, MSG_HI /* update state: */ call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update /* xor tag: */ movdqu 0(%rsi), MSG_LO movdqu 16(%rsi), MSG_HI pxor STATE0_LO, MSG_LO pxor STATE0_HI, MSG_HI movdqa STATE1_LO, T0_LO movdqa STATE1_HI, T0_HI rol3 T0_HI, T0_LO pxor T0_LO, MSG_LO pxor T0_HI, MSG_HI movdqa STATE2_LO, T0_LO movdqa STATE2_HI, T0_HI pand STATE3_LO, T0_LO pand STATE3_HI, T0_HI pxor T0_LO, MSG_LO pxor T0_HI, MSG_HI movdqu MSG_LO, 0(%rsi) movdqu MSG_HI, 16(%rsi) FRAME_END ret ENDPROC(crypto_morus1280_sse2_final)
AirFortressIlikara/LS2K0300-linux-4.19
78,453
arch/x86/crypto/aesni-intel_asm.S
/* * Implement AES algorithm in Intel AES-NI instructions. * * The white paper of AES-NI instructions can be downloaded from: * http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf * * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * Vinodh Gopal <vinodh.gopal@intel.com> * Kahraman Akdemir * * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD * interface for 64-bit kernels. * Authors: Erdinc Ozturk (erdinc.ozturk@intel.com) * Aidan O'Mahony (aidan.o.mahony@intel.com) * Adrian Hoban <adrian.hoban@intel.com> * James Guilford (james.guilford@intel.com) * Gabriele Paoloni <gabriele.paoloni@intel.com> * Tadeusz Struk (tadeusz.struk@intel.com) * Wajdi Feghali (wajdi.k.feghali@intel.com) * Copyright (c) 2010, Intel Corporation. * * Ported x86_64 version to x86: * Author: Mathias Krause <minipli@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> #include <asm/inst.h> #include <asm/frame.h> #include <asm/nospec-branch.h> /* * The following macros are used to move an (un)aligned 16 byte value to/from * an XMM register. This can done for either FP or integer values, for FP use * movaps (move aligned packed single) or integer use movdqa (move double quad * aligned). It doesn't make a performance difference which instruction is used * since Nehalem (original Core i7) was released. However, the movaps is a byte * shorter, so that is the one we'll use for now. (same for unaligned). */ #define MOVADQ movaps #define MOVUDQ movups #ifdef __x86_64__ # constants in mergeable sections, linker can reorder and merge .section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16 .align 16 .Lgf128mul_x_ble_mask: .octa 0x00000000000000010000000000000087 .section .rodata.cst16.POLY, "aM", @progbits, 16 .align 16 POLY: .octa 0xC2000000000000000000000000000001 .section .rodata.cst16.TWOONE, "aM", @progbits, 16 .align 16 TWOONE: .octa 0x00000001000000000000000000000001 .section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 .align 16 SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F .section .rodata.cst16.MASK1, "aM", @progbits, 16 .align 16 MASK1: .octa 0x0000000000000000ffffffffffffffff .section .rodata.cst16.MASK2, "aM", @progbits, 16 .align 16 MASK2: .octa 0xffffffffffffffff0000000000000000 .section .rodata.cst16.ONE, "aM", @progbits, 16 .align 16 ONE: .octa 0x00000000000000000000000000000001 .section .rodata.cst16.F_MIN_MASK, "aM", @progbits, 16 .align 16 F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0 .section .rodata.cst16.dec, "aM", @progbits, 16 .align 16 dec: .octa 0x1 .section .rodata.cst16.enc, "aM", @progbits, 16 .align 16 enc: .octa 0x2 # order of these constants should not change. # more specifically, ALL_F should follow SHIFT_MASK, # and zero should follow ALL_F .section .rodata, "a", @progbits .align 16 SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 .text #define STACK_OFFSET 8*3 #define AadHash 16*0 #define AadLen 16*1 #define InLen (16*1)+8 #define PBlockEncKey 16*2 #define OrigIV 16*3 #define CurCount 16*4 #define PBlockLen 16*5 #define HashKey 16*6 // store HashKey <<1 mod poly here #define HashKey_2 16*7 // store HashKey^2 <<1 mod poly here #define HashKey_3 16*8 // store HashKey^3 <<1 mod poly here #define HashKey_4 16*9 // store HashKey^4 <<1 mod poly here #define HashKey_k 16*10 // store XOR of High 64 bits and Low 64 // bits of HashKey <<1 mod poly here //(for Karatsuba purposes) #define HashKey_2_k 16*11 // store XOR of High 64 bits and Low 64 // bits of HashKey^2 <<1 mod poly here // (for Karatsuba purposes) #define HashKey_3_k 16*12 // store XOR of High 64 bits and Low 64 // bits of HashKey^3 <<1 mod poly here // (for Karatsuba purposes) #define HashKey_4_k 16*13 // store XOR of High 64 bits and Low 64 // bits of HashKey^4 <<1 mod poly here // (for Karatsuba purposes) #define arg1 rdi #define arg2 rsi #define arg3 rdx #define arg4 rcx #define arg5 r8 #define arg6 r9 #define arg7 STACK_OFFSET+8(%rsp) #define arg8 STACK_OFFSET+16(%rsp) #define arg9 STACK_OFFSET+24(%rsp) #define arg10 STACK_OFFSET+32(%rsp) #define arg11 STACK_OFFSET+40(%rsp) #define keysize 2*15*16(%arg1) #endif #define STATE1 %xmm0 #define STATE2 %xmm4 #define STATE3 %xmm5 #define STATE4 %xmm6 #define STATE STATE1 #define IN1 %xmm1 #define IN2 %xmm7 #define IN3 %xmm8 #define IN4 %xmm9 #define IN IN1 #define KEY %xmm2 #define IV %xmm3 #define BSWAP_MASK %xmm10 #define CTR %xmm11 #define INC %xmm12 #define GF128MUL_MASK %xmm10 #ifdef __x86_64__ #define AREG %rax #define KEYP %rdi #define OUTP %rsi #define UKEYP OUTP #define INP %rdx #define LEN %rcx #define IVP %r8 #define KLEN %r9d #define T1 %r10 #define TKEYP T1 #define T2 %r11 #define TCTR_LOW T2 #else #define AREG %eax #define KEYP %edi #define OUTP AREG #define UKEYP OUTP #define INP %edx #define LEN %esi #define IVP %ebp #define KLEN %ebx #define T1 %ecx #define TKEYP T1 #endif .macro FUNC_SAVE push %r12 push %r13 push %r14 # # states of %xmm registers %xmm6:%xmm15 not saved # all %xmm registers are clobbered # .endm .macro FUNC_RESTORE pop %r14 pop %r13 pop %r12 .endm # Precompute hashkeys. # Input: Hash subkey. # Output: HashKeys stored in gcm_context_data. Only needs to be called # once per key. # clobbers r12, and tmp xmm registers. .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7 mov \SUBKEY, %r12 movdqu (%r12), \TMP3 movdqa SHUF_MASK(%rip), \TMP2 PSHUFB_XMM \TMP2, \TMP3 # precompute HashKey<<1 mod poly from the HashKey (required for GHASH) movdqa \TMP3, \TMP2 psllq $1, \TMP3 psrlq $63, \TMP2 movdqa \TMP2, \TMP1 pslldq $8, \TMP2 psrldq $8, \TMP1 por \TMP2, \TMP3 # reduce HashKey<<1 pshufd $0x24, \TMP1, \TMP2 pcmpeqd TWOONE(%rip), \TMP2 pand POLY(%rip), \TMP2 pxor \TMP2, \TMP3 movdqu \TMP3, HashKey(%arg2) movdqa \TMP3, \TMP5 pshufd $78, \TMP3, \TMP1 pxor \TMP3, \TMP1 movdqu \TMP1, HashKey_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^2<<1 (mod poly) movdqu \TMP5, HashKey_2(%arg2) # HashKey_2 = HashKey^2<<1 (mod poly) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 movdqu \TMP1, HashKey_2_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) movdqu \TMP5, HashKey_3(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 movdqu \TMP1, HashKey_3_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) movdqu \TMP5, HashKey_4(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 movdqu \TMP1, HashKey_4_k(%arg2) .endm # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. # Clobbers rax, r10-r13 and xmm0-xmm6, %xmm13 .macro GCM_INIT Iv SUBKEY AAD AADLEN mov \AADLEN, %r11 mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length xor %r11d, %r11d mov %r11, InLen(%arg2) # ctx_data.in_length = 0 mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0 mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0 mov \Iv, %rax movdqu (%rax), %xmm0 movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv movdqa SHUF_MASK(%rip), %xmm2 PSHUFB_XMM %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ %xmm4, %xmm5, %xmm6 .endm # GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context # struct has been initialized by GCM_INIT. # Requires the input data be at least 1 byte long because of READ_PARTIAL_BLOCK # Clobbers rax, r10-r13, and xmm0-xmm15 .macro GCM_ENC_DEC operation movdqu AadHash(%arg2), %xmm8 movdqu HashKey(%arg2), %xmm13 add %arg5, InLen(%arg2) xor %r11d, %r11d # initialise the data pointer offset as zero PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation sub %r11, %arg5 # sub partial block data used mov %arg5, %r13 # save the number of bytes and $-16, %r13 # %r13 = %r13 - (%r13 mod 16) mov %r13, %r12 # Encrypt/Decrypt first few blocks and $(3<<4), %r12 jz _initial_num_blocks_is_0_\@ cmp $(2<<4), %r12 jb _initial_num_blocks_is_1_\@ je _initial_num_blocks_is_2_\@ _initial_num_blocks_is_3_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation sub $48, %r13 jmp _initial_blocks_\@ _initial_num_blocks_is_2_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation sub $32, %r13 jmp _initial_blocks_\@ _initial_num_blocks_is_1_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation sub $16, %r13 jmp _initial_blocks_\@ _initial_num_blocks_is_0_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation _initial_blocks_\@: # Main loop - Encrypt/Decrypt remaining blocks cmp $0, %r13 je _zero_cipher_left_\@ sub $64, %r13 je _four_cipher_left_\@ _crypt_by_4_\@: GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \ %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \ %xmm7, %xmm8, enc add $64, %r11 sub $64, %r13 jne _crypt_by_4_\@ _four_cipher_left_\@: GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \ %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8 _zero_cipher_left_\@: movdqu %xmm8, AadHash(%arg2) movdqu %xmm0, CurCount(%arg2) mov %arg5, %r13 and $15, %r13 # %r13 = arg5 (mod 16) je _multiple_of_16_bytes_\@ mov %r13, PBlockLen(%arg2) # Handle the last <16 Byte block separately paddd ONE(%rip), %xmm0 # INCR CNT to get Yn movdqu %xmm0, CurCount(%arg2) movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) movdqu %xmm0, PBlockEncKey(%arg2) cmp $16, %arg5 jge _large_enough_update_\@ lea (%arg4,%r11,1), %r10 mov %r13, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 jmp _data_read_\@ _large_enough_update_\@: sub $16, %r11 add %r13, %r11 # receive the last <16 Byte block movdqu (%arg4, %r11, 1), %xmm1 sub %r13, %r11 add $16, %r11 lea SHIFT_MASK+16(%rip), %r12 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes # (r13 is the number of bytes in plaintext mod 16) sub %r13, %r12 # get the appropriate shuffle mask movdqu (%r12), %xmm2 # shift right 16-r13 bytes PSHUFB_XMM %xmm2, %xmm1 _data_read_\@: lea ALL_F+16(%rip), %r12 sub %r13, %r12 .ifc \operation, dec movdqa %xmm1, %xmm2 .endif pxor %xmm1, %xmm0 # XOR Encrypt(K, Yn) movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 .ifc \operation, dec pand %xmm1, %xmm2 movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10 ,%xmm2 pxor %xmm2, %xmm8 .else movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10,%xmm0 pxor %xmm0, %xmm8 .endif movdqu %xmm8, AadHash(%arg2) .ifc \operation, enc # GHASH computation for the last <16 byte block movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm0 back to output as ciphertext PSHUFB_XMM %xmm10, %xmm0 .endif # Output %r13 bytes MOVQ_R64_XMM %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (%arg3 , %r11, 1) add $8, %r11 psrldq $8, %xmm0 MOVQ_R64_XMM %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: mov %al, (%arg3, %r11, 1) add $1, %r11 shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left_\@ _multiple_of_16_bytes_\@: .endm # GCM_COMPLETE Finishes update of tag of last partial block # Output: Authorization Tag (AUTH_TAG) # Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15 .macro GCM_COMPLETE AUTHTAG AUTHTAGLEN movdqu AadHash(%arg2), %xmm8 movdqu HashKey(%arg2), %xmm13 mov PBlockLen(%arg2), %r12 cmp $0, %r12 je _partial_done\@ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 _partial_done\@: mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes) shl $3, %r12 # convert into number of bits movd %r12d, %xmm15 # len(A) in %xmm15 mov InLen(%arg2), %r12 shl $3, %r12 # len(C) in bits (*128) MOVQ_R64_XMM %r12, %xmm1 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C) pxor %xmm15, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm8 movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0) pxor %xmm8, %xmm0 _return_T_\@: mov \AUTHTAG, %r10 # %r10 = authTag mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len cmp $16, %r11 je _T_16_\@ cmp $8, %r11 jl _T_4_\@ _T_8_\@: MOVQ_R64_XMM %xmm0, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 psrldq $8, %xmm0 cmp $0, %r11 je _return_T_done_\@ _T_4_\@: movd %xmm0, %eax mov %eax, (%r10) add $4, %r10 sub $4, %r11 psrldq $4, %xmm0 cmp $0, %r11 je _return_T_done_\@ _T_123_\@: movd %xmm0, %eax cmp $2, %r11 jl _T_1_\@ mov %ax, (%r10) cmp $2, %r11 je _return_T_done_\@ add $2, %r10 sar $16, %eax _T_1_\@: mov %al, (%r10) jmp _return_T_done_\@ _T_16_\@: movdqu %xmm0, (%r10) _return_T_done_\@: .endm #ifdef __x86_64__ /* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) * * * Input: A and B (128-bits each, bit-reflected) * Output: C = A*B*x mod poly, (i.e. >>1 ) * To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input * GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. * */ .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5 movdqa \GH, \TMP1 pshufd $78, \GH, \TMP2 pshufd $78, \HK, \TMP3 pxor \GH, \TMP2 # TMP2 = a1+a0 pxor \HK, \TMP3 # TMP3 = b1+b0 PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \HK, \GH # GH = a0*b0 PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) pxor \GH, \TMP2 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0) movdqa \TMP2, \TMP3 pslldq $8, \TMP3 # left shift TMP3 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP3, \GH pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK # first phase of the reduction movdqa \GH, \TMP2 movdqa \GH, \TMP3 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4 # in in order to perform # independent shifts pslld $31, \TMP2 # packed right shift <<31 pslld $30, \TMP3 # packed right shift <<30 pslld $25, \TMP4 # packed right shift <<25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP5 psrldq $4, \TMP5 # right shift TMP5 1 DW pslldq $12, \TMP2 # left shift TMP2 3 DWs pxor \TMP2, \GH # second phase of the reduction movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4 # in in order to perform # independent shifts movdqa \GH,\TMP3 movdqa \GH,\TMP4 psrld $1,\TMP2 # packed left shift >>1 psrld $2,\TMP3 # packed left shift >>2 psrld $7,\TMP4 # packed left shift >>7 pxor \TMP3,\TMP2 # xor the shifted versions pxor \TMP4,\TMP2 pxor \TMP5, \TMP2 pxor \TMP2, \GH pxor \TMP1, \GH # result is in TMP1 .endm # Reads DLEN bytes starting at DPTR and stores in XMMDst # where 0 < DLEN < 16 # Clobbers %rax, DLEN and XMM1 .macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst cmp $8, \DLEN jl _read_lt8_\@ mov (\DPTR), %rax MOVQ_R64_XMM %rax, \XMMDst sub $8, \DLEN jz _done_read_partial_block_\@ xor %eax, %eax _read_next_byte_\@: shl $8, %rax mov 7(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_\@ MOVQ_R64_XMM %rax, \XMM1 pslldq $8, \XMM1 por \XMM1, \XMMDst jmp _done_read_partial_block_\@ _read_lt8_\@: xor %eax, %eax _read_next_byte_lt8_\@: shl $8, %rax mov -1(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_lt8_\@ MOVQ_R64_XMM %rax, \XMMDst _done_read_partial_block_\@: .endm # CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted. # clobbers r10-11, xmm14 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 TMP7 MOVADQ SHUF_MASK(%rip), %xmm14 mov \AAD, %r10 # %r10 = AAD mov \AADLEN, %r11 # %r11 = aadLen pxor \TMP7, \TMP7 pxor \TMP6, \TMP6 cmp $16, %r11 jl _get_AAD_rest\@ _get_AAD_blocks\@: movdqu (%r10), \TMP7 PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP7, \TMP6 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 add $16, %r10 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\@ movdqu \TMP6, \TMP7 /* read the last <16B of AAD */ _get_AAD_rest\@: cmp $0, %r11 je _get_AAD_done\@ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7 PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP6, \TMP7 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 movdqu \TMP7, \TMP6 _get_AAD_done\@: movdqu \TMP6, AadHash(%arg2) .endm # PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks # between update calls. # Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK # Outputs encrypted bytes, and updates hash and partial info in gcm_data_context # Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13 .macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ AAD_HASH operation mov PBlockLen(%arg2), %r13 cmp $0, %r13 je _partial_block_done_\@ # Leave Macro if no partial blocks # Read in input data without over reading cmp $16, \PLAIN_CYPH_LEN jl _fewer_than_16_bytes_\@ movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm jmp _data_read_\@ _fewer_than_16_bytes_\@: lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10 mov \PLAIN_CYPH_LEN, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1 mov PBlockLen(%arg2), %r13 _data_read_\@: # Finished reading in data movdqu PBlockEncKey(%arg2), %xmm9 movdqu HashKey(%arg2), %xmm13 lea SHIFT_MASK(%rip), %r12 # adjust the shuffle mask pointer to be able to shift r13 bytes # r16-r13 is the number of bytes in plaintext mod 16) add %r13, %r12 movdqu (%r12), %xmm2 # get the appropriate shuffle mask PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes .ifc \operation, dec movdqa %xmm1, %xmm3 pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 # Determine if if partial block is not being filled and # shift mask accordingly jge _no_extra_mask_1_\@ sub %r10, %r12 _no_extra_mask_1_\@: movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out bottom r13 bytes of xmm9 pand %xmm1, %xmm9 # mask out bottom r13 bytes of xmm9 pand %xmm1, %xmm3 movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm3 PSHUFB_XMM %xmm2, %xmm3 pxor %xmm3, \AAD_HASH cmp $0, %r10 jl _partial_incomplete_1_\@ # GHASH computation for the last <16 Byte block GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 xor %eax, %eax mov %rax, PBlockLen(%arg2) jmp _dec_done_\@ _partial_incomplete_1_\@: add \PLAIN_CYPH_LEN, PBlockLen(%arg2) _dec_done_\@: movdqu \AAD_HASH, AadHash(%arg2) .else pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 # Determine if if partial block is not being filled and # shift mask accordingly jge _no_extra_mask_2_\@ sub %r10, %r12 _no_extra_mask_2_\@: movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out bottom r13 bytes of xmm9 pand %xmm1, %xmm9 movdqa SHUF_MASK(%rip), %xmm1 PSHUFB_XMM %xmm1, %xmm9 PSHUFB_XMM %xmm2, %xmm9 pxor %xmm9, \AAD_HASH cmp $0, %r10 jl _partial_incomplete_2_\@ # GHASH computation for the last <16 Byte block GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 xor %eax, %eax mov %rax, PBlockLen(%arg2) jmp _encode_done_\@ _partial_incomplete_2_\@: add \PLAIN_CYPH_LEN, PBlockLen(%arg2) _encode_done_\@: movdqu \AAD_HASH, AadHash(%arg2) movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm9 back to output as ciphertext PSHUFB_XMM %xmm10, %xmm9 PSHUFB_XMM %xmm2, %xmm9 .endif # output encrypted Bytes cmp $0, %r10 jl _partial_fill_\@ mov %r13, %r12 mov $16, %r13 # Set r13 to be the number of bytes to write out sub %r12, %r13 jmp _count_set_\@ _partial_fill_\@: mov \PLAIN_CYPH_LEN, %r13 _count_set_\@: movdqa %xmm9, %xmm0 MOVQ_R64_XMM %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $8, \DATA_OFFSET psrldq $8, %xmm0 MOVQ_R64_XMM %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $1, \DATA_OFFSET shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left_\@ _partial_block_done_\@: .endm # PARTIAL_BLOCK /* * if a = number of total plaintext bytes * b = floor(a/16) * num_initial_blocks = b mod 4 * encrypt the initial num_initial_blocks blocks and apply ghash on * the ciphertext * %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers * are clobbered * arg1, %arg2, %arg3 are used as a pointer only, not modified */ .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 movdqu AadHash(%arg2), %xmm\i # XMM0 = Y0 # start AES for num_initial_blocks blocks movdqu CurCount(%arg2), \XMM0 # XMM0 = Y0 .if (\i == 5) || (\i == 6) || (\i == 7) MOVADQ ONE(%RIP),\TMP1 MOVADQ 0(%arg1),\TMP2 .irpc index, \i_seq paddd \TMP1, \XMM0 # INCR Y0 .ifc \operation, dec movdqa \XMM0, %xmm\index .else MOVADQ \XMM0, %xmm\index .endif PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap pxor \TMP2, %xmm\index .endr lea 0x10(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 add $5,%eax # 128->9, 192->11, 256->13 aes_loop_initial_\@: MOVADQ (%r10),\TMP1 .irpc index, \i_seq AESENC \TMP1, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_initial_\@ MOVADQ (%r10), \TMP1 .irpc index, \i_seq AESENCLAST \TMP1, %xmm\index # Last Round .endr .irpc index, \i_seq movdqu (%arg4 , %r11, 1), \TMP1 pxor \TMP1, %xmm\index movdqu %xmm\index, (%arg3 , %r11, 1) # write back plaintext/ciphertext for num_initial_blocks add $16, %r11 .ifc \operation, dec movdqa \TMP1, %xmm\index .endif PSHUFB_XMM %xmm14, %xmm\index # prepare plaintext/ciphertext for GHASH computation .endr .endif # apply GHASH on num_initial_blocks blocks .if \i == 5 pxor %xmm5, %xmm6 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 pxor %xmm6, %xmm7 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 pxor %xmm7, %xmm8 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 .elseif \i == 6 pxor %xmm6, %xmm7 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 pxor %xmm7, %xmm8 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 .elseif \i == 7 pxor %xmm7, %xmm8 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 .endif cmp $64, %r13 jl _initial_blocks_done\@ # no need for precomputed values /* * * Precomputations for HashKey parallel with encryption of first 4 blocks. * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i */ MOVADQ ONE(%RIP),\TMP1 paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM1 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM2 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM3 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM4 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap MOVADQ 0(%arg1),\TMP1 pxor \TMP1, \XMM1 pxor \TMP1, \XMM2 pxor \TMP1, \XMM3 pxor \TMP1, \XMM4 .irpc index, 1234 # do 4 rounds movaps 0x10*\index(%arg1), \TMP1 AESENC \TMP1, \XMM1 AESENC \TMP1, \XMM2 AESENC \TMP1, \XMM3 AESENC \TMP1, \XMM4 .endr .irpc index, 56789 # do next 5 rounds movaps 0x10*\index(%arg1), \TMP1 AESENC \TMP1, \XMM1 AESENC \TMP1, \XMM2 AESENC \TMP1, \XMM3 AESENC \TMP1, \XMM4 .endr lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 sub $4,%eax # 128->0, 192->2, 256->4 jz aes_loop_pre_done\@ aes_loop_pre_\@: MOVADQ (%r10),\TMP2 .irpc index, 1234 AESENC \TMP2, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_pre_\@ aes_loop_pre_done\@: MOVADQ (%r10), \TMP2 AESENCLAST \TMP2, \XMM1 AESENCLAST \TMP2, \XMM2 AESENCLAST \TMP2, \XMM3 AESENCLAST \TMP2, \XMM4 movdqu 16*0(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM1 .ifc \operation, dec movdqu \XMM1, 16*0(%arg3 , %r11 , 1) movdqa \TMP1, \XMM1 .endif movdqu 16*1(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM2 .ifc \operation, dec movdqu \XMM2, 16*1(%arg3 , %r11 , 1) movdqa \TMP1, \XMM2 .endif movdqu 16*2(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM3 .ifc \operation, dec movdqu \XMM3, 16*2(%arg3 , %r11 , 1) movdqa \TMP1, \XMM3 .endif movdqu 16*3(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM4 .ifc \operation, dec movdqu \XMM4, 16*3(%arg3 , %r11 , 1) movdqa \TMP1, \XMM4 .else movdqu \XMM1, 16*0(%arg3 , %r11 , 1) movdqu \XMM2, 16*1(%arg3 , %r11 , 1) movdqu \XMM3, 16*2(%arg3 , %r11 , 1) movdqu \XMM4, 16*3(%arg3 , %r11 , 1) .endif add $64, %r11 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap pxor \XMMDst, \XMM1 # combine GHASHed value with the corresponding ciphertext PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap _initial_blocks_done\@: .endm /* * encrypt 4 blocks at a time * ghash the 4 previously encrypted ciphertext blocks * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 movdqa \XMM2, \XMM6 movdqa \XMM3, \XMM7 movdqa \XMM4, \XMM8 movdqa SHUF_MASK(%rip), %xmm15 # multiply TMP5 * HashKey using karatsuba movdqa \XMM5, \TMP4 pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 1 AESENC \TMP1, \XMM2 AESENC \TMP1, \XMM3 AESENC \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 2 AESENC \TMP1, \XMM2 AESENC \TMP1, \XMM3 AESENC \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 3 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 4 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 5 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 pxor \TMP2, \TMP6 movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 6 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 7 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 8 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 pxor \TMP2, \TMP6 # Multiply XMM8 * HashKey # XMM8 and TMP5 hold the values for the two operands movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 9 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 sub $4,%eax # 128->0, 192->2, 256->4 jz aes_loop_par_enc_done\@ aes_loop_par_enc\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 AESENC \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_par_enc\@ aes_loop_par_enc_done\@: MOVADQ (%r10), \TMP3 AESENCLAST \TMP3, \XMM1 # Round 10 AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu 16(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK movdqu 32(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK movdqu 48(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK movdqu \XMM1, (%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 pxor \TMP6, \TMP2 pxor \TMP1, \TMP2 pxor \XMM5, \TMP2 movdqa \TMP2, \TMP3 pslldq $8, \TMP3 # left shift TMP3 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP3, \XMM5 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5 # first phase of reduction movdqa \XMM5, \TMP2 movdqa \XMM5, \TMP3 movdqa \XMM5, \TMP4 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently pslld $31, \TMP2 # packed right shift << 31 pslld $30, \TMP3 # packed right shift << 30 pslld $25, \TMP4 # packed right shift << 25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP5 psrldq $4, \TMP5 # right shift T5 1 DW pslldq $12, \TMP2 # left shift T2 3 DWs pxor \TMP2, \XMM5 # second phase of reduction movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4 movdqa \XMM5,\TMP3 movdqa \XMM5,\TMP4 psrld $1, \TMP2 # packed left shift >>1 psrld $2, \TMP3 # packed left shift >>2 psrld $7, \TMP4 # packed left shift >>7 pxor \TMP3,\TMP2 # xor the shifted versions pxor \TMP4,\TMP2 pxor \TMP5, \TMP2 pxor \TMP2, \XMM5 pxor \TMP1, \XMM5 # result is in TMP1 pxor \XMM5, \XMM1 .endm /* * decrypt 4 blocks at a time * ghash the 4 previously decrypted ciphertext blocks * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 movdqa \XMM2, \XMM6 movdqa \XMM3, \XMM7 movdqa \XMM4, \XMM8 movdqa SHUF_MASK(%rip), %xmm15 # multiply TMP5 * HashKey using karatsuba movdqa \XMM5, \TMP4 pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 1 AESENC \TMP1, \XMM2 AESENC \TMP1, \XMM3 AESENC \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 2 AESENC \TMP1, \XMM2 AESENC \TMP1, \XMM3 AESENC \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 3 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 4 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 5 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 pxor \TMP2, \TMP6 movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 6 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 7 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 8 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 pxor \TMP2, \TMP6 # Multiply XMM8 * HashKey # XMM8 and TMP5 hold the values for the two operands movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 9 AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 sub $4,%eax # 128->0, 192->2, 256->4 jz aes_loop_par_dec_done\@ aes_loop_par_dec\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 AESENC \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_par_dec\@ aes_loop_par_dec_done\@: MOVADQ (%r10), \TMP3 AESENCLAST \TMP3, \XMM1 # last round AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM1 movdqu 16(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK movdqu \XMM2, 16(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM2 movdqu 32(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK movdqu \XMM3, 32(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM3 movdqu 48(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM4 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 pxor \TMP6, \TMP2 pxor \TMP1, \TMP2 pxor \XMM5, \TMP2 movdqa \TMP2, \TMP3 pslldq $8, \TMP3 # left shift TMP3 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP3, \XMM5 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5 # first phase of reduction movdqa \XMM5, \TMP2 movdqa \XMM5, \TMP3 movdqa \XMM5, \TMP4 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently pslld $31, \TMP2 # packed right shift << 31 pslld $30, \TMP3 # packed right shift << 30 pslld $25, \TMP4 # packed right shift << 25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP5 psrldq $4, \TMP5 # right shift T5 1 DW pslldq $12, \TMP2 # left shift T2 3 DWs pxor \TMP2, \XMM5 # second phase of reduction movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4 movdqa \XMM5,\TMP3 movdqa \XMM5,\TMP4 psrld $1, \TMP2 # packed left shift >>1 psrld $2, \TMP3 # packed left shift >>2 psrld $7, \TMP4 # packed left shift >>7 pxor \TMP3,\TMP2 # xor the shifted versions pxor \TMP4,\TMP2 pxor \TMP5, \TMP2 pxor \TMP2, \XMM5 pxor \TMP1, \XMM5 # result is in TMP1 pxor \XMM5, \XMM1 .endm /* GHASH the last 4 ciphertext blocks. */ .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst # Multiply TMP6 * HashKey (using Karatsuba) movdqa \XMM1, \TMP6 pshufd $78, \XMM1, \TMP2 pxor \XMM1, \TMP2 movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 movdqu HashKey_4_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqa \XMM1, \XMMDst movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 # Multiply TMP1 * HashKey (using Karatsuba) movdqa \XMM2, \TMP1 pshufd $78, \XMM2, \TMP2 pxor \XMM2, \TMP2 movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 movdqu HashKey_3_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM2, \XMMDst pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 # Multiply TMP1 * HashKey (using Karatsuba) movdqa \XMM3, \TMP1 pshufd $78, \XMM3, \TMP2 pxor \XMM3, \TMP2 movdqu HashKey_2(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 movdqu HashKey_2_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM3, \XMMDst pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 # Multiply TMP1 * HashKey (using Karatsuba) movdqa \XMM4, \TMP1 pshufd $78, \XMM4, \TMP2 pxor \XMM4, \TMP2 movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 movdqu HashKey_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM4, \XMMDst pxor \XMM1, \TMP2 pxor \TMP6, \TMP2 pxor \XMMDst, \TMP2 # middle section of the temp results combined as in karatsuba algorithm movdqa \TMP2, \TMP4 pslldq $8, \TMP4 # left shift TMP4 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP4, \XMMDst pxor \TMP2, \TMP6 # TMP6:XMMDst holds the result of the accumulated carry-less multiplications # first phase of the reduction movdqa \XMMDst, \TMP2 movdqa \XMMDst, \TMP3 movdqa \XMMDst, \TMP4 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently pslld $31, \TMP2 # packed right shifting << 31 pslld $30, \TMP3 # packed right shifting << 30 pslld $25, \TMP4 # packed right shifting << 25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP7 psrldq $4, \TMP7 # right shift TMP7 1 DW pslldq $12, \TMP2 # left shift TMP2 3 DWs pxor \TMP2, \XMMDst # second phase of the reduction movdqa \XMMDst, \TMP2 # make 3 copies of XMMDst for doing 3 shift operations movdqa \XMMDst, \TMP3 movdqa \XMMDst, \TMP4 psrld $1, \TMP2 # packed left shift >> 1 psrld $2, \TMP3 # packed left shift >> 2 psrld $7, \TMP4 # packed left shift >> 7 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 pxor \TMP7, \TMP2 pxor \TMP2, \XMMDst pxor \TMP6, \XMMDst # reduced result is in XMMDst .endm /* Encryption of a single block * uses eax & r10 */ .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 pxor (%arg1), \XMM0 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 add $5,%eax # 128->9, 192->11, 256->13 lea 16(%arg1), %r10 # get first expanded key address _esb_loop_\@: MOVADQ (%r10),\TMP1 AESENC \TMP1,\XMM0 add $16,%r10 sub $1,%eax jnz _esb_loop_\@ MOVADQ (%r10),\TMP1 AESENCLAST \TMP1,\XMM0 .endm /***************************************************************************** * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data * // Context data * u8 *out, // Plaintext output. Encrypt in-place is allowed. * const u8 *in, // Ciphertext input * u64 plaintext_len, // Length of data in bytes for decryption. * u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) * // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) * // concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes * u8 *auth_tag, // Authenticated Tag output. The driver will compare this to the * // given authentication tag and only return the plaintext if they match. * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 * // (most likely), 12 or 8. * * Assumptions: * * keys: * keys are pre-expanded and aligned to 16 bytes. we are using the first * set of 11 keys in the data structure void *aes_ctx * * iv: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Salt (From the SA) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initialization Vector | * | (This is the sequence number from IPSec header) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * * AAD: * AAD padded to 128 bits with 0 * for example, assume AAD is a u32 vector * * if AAD is 8 bytes: * AAD[3] = {A0, A1}; * padded AAD in xmm register = {A1 A0 0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A1) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 32-bit Sequence Number (A0) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 32-bit Sequence Number * * if AAD is 12 bytes: * AAD[3] = {A0, A1, A2}; * padded AAD in xmm register = {A2 A1 A0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A2) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 64-bit Extended Sequence Number {A1,A0} | * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 64-bit Extended Sequence Number * * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ ENTRY(aesni_gcm_dec) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 GCM_ENC_DEC dec GCM_COMPLETE arg10, arg11 FUNC_RESTORE ret ENDPROC(aesni_gcm_dec) /***************************************************************************** * void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data * // Context data * u8 *out, // Ciphertext output. Encrypt in-place is allowed. * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. * u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) * // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) * // concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes * u8 *auth_tag, // Authenticated Tag output. * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), * // 12 or 8. * * Assumptions: * * keys: * keys are pre-expanded and aligned to 16 bytes. we are using the * first set of 11 keys in the data structure void *aes_ctx * * * iv: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Salt (From the SA) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initialization Vector | * | (This is the sequence number from IPSec header) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * * AAD: * AAD padded to 128 bits with 0 * for example, assume AAD is a u32 vector * * if AAD is 8 bytes: * AAD[3] = {A0, A1}; * padded AAD in xmm register = {A1 A0 0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A1) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 32-bit Sequence Number (A0) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 32-bit Sequence Number * * if AAD is 12 bytes: * AAD[3] = {A0, A1, A2}; * padded AAD in xmm register = {A2 A1 A0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A2) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 64-bit Extended Sequence Number {A1,A0} | * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 64-bit Extended Sequence Number * * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ ENTRY(aesni_gcm_enc) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 GCM_ENC_DEC enc GCM_COMPLETE arg10, arg11 FUNC_RESTORE ret ENDPROC(aesni_gcm_enc) /***************************************************************************** * void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) * // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) * // concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len) // Length of AAD in bytes. */ ENTRY(aesni_gcm_init) FUNC_SAVE GCM_INIT %arg3, %arg4,%arg5, %arg6 FUNC_RESTORE ret ENDPROC(aesni_gcm_init) /***************************************************************************** * void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *out, // Ciphertext output. Encrypt in-place is allowed. * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ ENTRY(aesni_gcm_enc_update) FUNC_SAVE GCM_ENC_DEC enc FUNC_RESTORE ret ENDPROC(aesni_gcm_enc_update) /***************************************************************************** * void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *out, // Ciphertext output. Encrypt in-place is allowed. * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ ENTRY(aesni_gcm_dec_update) FUNC_SAVE GCM_ENC_DEC dec FUNC_RESTORE ret ENDPROC(aesni_gcm_dec_update) /***************************************************************************** * void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *auth_tag, // Authenticated Tag output. * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), * // 12 or 8. */ ENTRY(aesni_gcm_finalize) FUNC_SAVE GCM_COMPLETE %arg3 %arg4 FUNC_RESTORE ret ENDPROC(aesni_gcm_finalize) #endif .align 4 _key_expansion_128: _key_expansion_256a: pshufd $0b11111111, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 movaps %xmm0, (TKEYP) add $0x10, TKEYP ret ENDPROC(_key_expansion_128) ENDPROC(_key_expansion_256a) .align 4 _key_expansion_192a: pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 movaps %xmm2, %xmm5 movaps %xmm2, %xmm6 pslldq $4, %xmm5 pshufd $0b11111111, %xmm0, %xmm3 pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 movaps %xmm0, %xmm1 shufps $0b01000100, %xmm0, %xmm6 movaps %xmm6, (TKEYP) shufps $0b01001110, %xmm2, %xmm1 movaps %xmm1, 0x10(TKEYP) add $0x20, TKEYP ret ENDPROC(_key_expansion_192a) .align 4 _key_expansion_192b: pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 movaps %xmm2, %xmm5 pslldq $4, %xmm5 pshufd $0b11111111, %xmm0, %xmm3 pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 movaps %xmm0, (TKEYP) add $0x10, TKEYP ret ENDPROC(_key_expansion_192b) .align 4 _key_expansion_256b: pshufd $0b10101010, %xmm1, %xmm1 shufps $0b00010000, %xmm2, %xmm4 pxor %xmm4, %xmm2 shufps $0b10001100, %xmm2, %xmm4 pxor %xmm4, %xmm2 pxor %xmm1, %xmm2 movaps %xmm2, (TKEYP) add $0x10, TKEYP ret ENDPROC(_key_expansion_256b) /* * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, * unsigned int key_len) */ ENTRY(aesni_set_key) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP movl (FRAME_OFFSET+8)(%esp), KEYP # ctx movl (FRAME_OFFSET+12)(%esp), UKEYP # in_key movl (FRAME_OFFSET+16)(%esp), %edx # key_len #endif movups (UKEYP), %xmm0 # user key (first 16 bytes) movaps %xmm0, (KEYP) lea 0x10(KEYP), TKEYP # key addr movl %edx, 480(KEYP) pxor %xmm4, %xmm4 # xmm4 is assumed 0 in _key_expansion_x cmp $24, %dl jb .Lenc_key128 je .Lenc_key192 movups 0x10(UKEYP), %xmm2 # other user key movaps %xmm2, (TKEYP) add $0x10, TKEYP AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 call _key_expansion_256a AESKEYGENASSIST 0x1 %xmm0 %xmm1 call _key_expansion_256b AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 call _key_expansion_256a AESKEYGENASSIST 0x2 %xmm0 %xmm1 call _key_expansion_256b AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 call _key_expansion_256a AESKEYGENASSIST 0x4 %xmm0 %xmm1 call _key_expansion_256b AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 call _key_expansion_256a AESKEYGENASSIST 0x8 %xmm0 %xmm1 call _key_expansion_256b AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 call _key_expansion_256a AESKEYGENASSIST 0x10 %xmm0 %xmm1 call _key_expansion_256b AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 call _key_expansion_256a AESKEYGENASSIST 0x20 %xmm0 %xmm1 call _key_expansion_256b AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 call _key_expansion_256a jmp .Ldec_key .Lenc_key192: movq 0x10(UKEYP), %xmm2 # other user key AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 call _key_expansion_192a AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 call _key_expansion_192b AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 call _key_expansion_192a AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 call _key_expansion_192b AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 call _key_expansion_192a AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 call _key_expansion_192b AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 call _key_expansion_192a AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8 call _key_expansion_192b jmp .Ldec_key .Lenc_key128: AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1 call _key_expansion_128 AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2 call _key_expansion_128 AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3 call _key_expansion_128 AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4 call _key_expansion_128 AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5 call _key_expansion_128 AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6 call _key_expansion_128 AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7 call _key_expansion_128 AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8 call _key_expansion_128 AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9 call _key_expansion_128 AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10 call _key_expansion_128 .Ldec_key: sub $0x10, TKEYP movaps (KEYP), %xmm0 movaps (TKEYP), %xmm1 movaps %xmm0, 240(TKEYP) movaps %xmm1, 240(KEYP) add $0x10, KEYP lea 240-16(TKEYP), UKEYP .align 4 .Ldec_key_loop: movaps (KEYP), %xmm0 AESIMC %xmm0 %xmm1 movaps %xmm1, (UKEYP) add $0x10, KEYP sub $0x10, UKEYP cmp TKEYP, KEYP jb .Ldec_key_loop xor AREG, AREG #ifndef __x86_64__ popl KEYP #endif FRAME_END ret ENDPROC(aesni_set_key) /* * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ ENTRY(aesni_enc) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP pushl KLEN movl (FRAME_OFFSET+12)(%esp), KEYP # ctx movl (FRAME_OFFSET+16)(%esp), OUTP # dst movl (FRAME_OFFSET+20)(%esp), INP # src #endif movl 480(KEYP), KLEN # key length movups (INP), STATE # input call _aesni_enc1 movups STATE, (OUTP) # output #ifndef __x86_64__ popl KLEN popl KEYP #endif FRAME_END ret ENDPROC(aesni_enc) /* * _aesni_enc1: internal ABI * input: * KEYP: key struct pointer * KLEN: round count * STATE: initial state (input) * output: * STATE: finial state (output) * changed: * KEY * TKEYP (T1) */ .align 4 _aesni_enc1: movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 add $0x30, TKEYP cmp $24, KLEN jb .Lenc128 lea 0x20(TKEYP), TKEYP je .Lenc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY AESENC KEY STATE movaps -0x50(TKEYP), KEY AESENC KEY STATE .align 4 .Lenc192: movaps -0x40(TKEYP), KEY AESENC KEY STATE movaps -0x30(TKEYP), KEY AESENC KEY STATE .align 4 .Lenc128: movaps -0x20(TKEYP), KEY AESENC KEY STATE movaps -0x10(TKEYP), KEY AESENC KEY STATE movaps (TKEYP), KEY AESENC KEY STATE movaps 0x10(TKEYP), KEY AESENC KEY STATE movaps 0x20(TKEYP), KEY AESENC KEY STATE movaps 0x30(TKEYP), KEY AESENC KEY STATE movaps 0x40(TKEYP), KEY AESENC KEY STATE movaps 0x50(TKEYP), KEY AESENC KEY STATE movaps 0x60(TKEYP), KEY AESENC KEY STATE movaps 0x70(TKEYP), KEY AESENCLAST KEY STATE ret ENDPROC(_aesni_enc1) /* * _aesni_enc4: internal ABI * input: * KEYP: key struct pointer * KLEN: round count * STATE1: initial state (input) * STATE2 * STATE3 * STATE4 * output: * STATE1: finial state (output) * STATE2 * STATE3 * STATE4 * changed: * KEY * TKEYP (T1) */ .align 4 _aesni_enc4: movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 pxor KEY, STATE2 pxor KEY, STATE3 pxor KEY, STATE4 add $0x30, TKEYP cmp $24, KLEN jb .L4enc128 lea 0x20(TKEYP), TKEYP je .L4enc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps -0x50(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 #.align 4 .L4enc192: movaps -0x40(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps -0x30(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 #.align 4 .L4enc128: movaps -0x20(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps -0x10(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps (TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x10(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x20(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x30(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x40(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x50(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x60(TKEYP), KEY AESENC KEY STATE1 AESENC KEY STATE2 AESENC KEY STATE3 AESENC KEY STATE4 movaps 0x70(TKEYP), KEY AESENCLAST KEY STATE1 # last round AESENCLAST KEY STATE2 AESENCLAST KEY STATE3 AESENCLAST KEY STATE4 ret ENDPROC(_aesni_enc4) /* * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ ENTRY(aesni_dec) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP pushl KLEN movl (FRAME_OFFSET+12)(%esp), KEYP # ctx movl (FRAME_OFFSET+16)(%esp), OUTP # dst movl (FRAME_OFFSET+20)(%esp), INP # src #endif mov 480(KEYP), KLEN # key length add $240, KEYP movups (INP), STATE # input call _aesni_dec1 movups STATE, (OUTP) #output #ifndef __x86_64__ popl KLEN popl KEYP #endif FRAME_END ret ENDPROC(aesni_dec) /* * _aesni_dec1: internal ABI * input: * KEYP: key struct pointer * KLEN: key length * STATE: initial state (input) * output: * STATE: finial state (output) * changed: * KEY * TKEYP (T1) */ .align 4 _aesni_dec1: movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 add $0x30, TKEYP cmp $24, KLEN jb .Ldec128 lea 0x20(TKEYP), TKEYP je .Ldec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY AESDEC KEY STATE movaps -0x50(TKEYP), KEY AESDEC KEY STATE .align 4 .Ldec192: movaps -0x40(TKEYP), KEY AESDEC KEY STATE movaps -0x30(TKEYP), KEY AESDEC KEY STATE .align 4 .Ldec128: movaps -0x20(TKEYP), KEY AESDEC KEY STATE movaps -0x10(TKEYP), KEY AESDEC KEY STATE movaps (TKEYP), KEY AESDEC KEY STATE movaps 0x10(TKEYP), KEY AESDEC KEY STATE movaps 0x20(TKEYP), KEY AESDEC KEY STATE movaps 0x30(TKEYP), KEY AESDEC KEY STATE movaps 0x40(TKEYP), KEY AESDEC KEY STATE movaps 0x50(TKEYP), KEY AESDEC KEY STATE movaps 0x60(TKEYP), KEY AESDEC KEY STATE movaps 0x70(TKEYP), KEY AESDECLAST KEY STATE ret ENDPROC(_aesni_dec1) /* * _aesni_dec4: internal ABI * input: * KEYP: key struct pointer * KLEN: key length * STATE1: initial state (input) * STATE2 * STATE3 * STATE4 * output: * STATE1: finial state (output) * STATE2 * STATE3 * STATE4 * changed: * KEY * TKEYP (T1) */ .align 4 _aesni_dec4: movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 pxor KEY, STATE2 pxor KEY, STATE3 pxor KEY, STATE4 add $0x30, TKEYP cmp $24, KLEN jb .L4dec128 lea 0x20(TKEYP), TKEYP je .L4dec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps -0x50(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 .align 4 .L4dec192: movaps -0x40(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps -0x30(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 .align 4 .L4dec128: movaps -0x20(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps -0x10(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps (TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x10(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x20(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x30(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x40(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x50(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x60(TKEYP), KEY AESDEC KEY STATE1 AESDEC KEY STATE2 AESDEC KEY STATE3 AESDEC KEY STATE4 movaps 0x70(TKEYP), KEY AESDECLAST KEY STATE1 # last round AESDECLAST KEY STATE2 AESDECLAST KEY STATE3 AESDECLAST KEY STATE4 ret ENDPROC(_aesni_dec4) /* * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len) */ ENTRY(aesni_ecb_enc) FRAME_BEGIN #ifndef __x86_64__ pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+16)(%esp), KEYP # ctx movl (FRAME_OFFSET+20)(%esp), OUTP # dst movl (FRAME_OFFSET+24)(%esp), INP # src movl (FRAME_OFFSET+28)(%esp), LEN # len #endif test LEN, LEN # check length jz .Lecb_enc_ret mov 480(KEYP), KLEN cmp $16, LEN jb .Lecb_enc_ret cmp $64, LEN jb .Lecb_enc_loop1 .align 4 .Lecb_enc_loop4: movups (INP), STATE1 movups 0x10(INP), STATE2 movups 0x20(INP), STATE3 movups 0x30(INP), STATE4 call _aesni_enc4 movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lecb_enc_loop4 cmp $16, LEN jb .Lecb_enc_ret .align 4 .Lecb_enc_loop1: movups (INP), STATE1 call _aesni_enc1 movups STATE1, (OUTP) sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lecb_enc_loop1 .Lecb_enc_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN #endif FRAME_END ret ENDPROC(aesni_ecb_enc) /* * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len); */ ENTRY(aesni_ecb_dec) FRAME_BEGIN #ifndef __x86_64__ pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+16)(%esp), KEYP # ctx movl (FRAME_OFFSET+20)(%esp), OUTP # dst movl (FRAME_OFFSET+24)(%esp), INP # src movl (FRAME_OFFSET+28)(%esp), LEN # len #endif test LEN, LEN jz .Lecb_dec_ret mov 480(KEYP), KLEN add $240, KEYP cmp $16, LEN jb .Lecb_dec_ret cmp $64, LEN jb .Lecb_dec_loop1 .align 4 .Lecb_dec_loop4: movups (INP), STATE1 movups 0x10(INP), STATE2 movups 0x20(INP), STATE3 movups 0x30(INP), STATE4 call _aesni_dec4 movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lecb_dec_loop4 cmp $16, LEN jb .Lecb_dec_ret .align 4 .Lecb_dec_loop1: movups (INP), STATE1 call _aesni_dec1 movups STATE1, (OUTP) sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lecb_dec_loop1 .Lecb_dec_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN #endif FRAME_END ret ENDPROC(aesni_ecb_dec) /* * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ ENTRY(aesni_cbc_enc) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv #endif cmp $16, LEN jb .Lcbc_enc_ret mov 480(KEYP), KLEN movups (IVP), STATE # load iv as initial state .align 4 .Lcbc_enc_loop: movups (INP), IN # load input pxor IN, STATE call _aesni_enc1 movups STATE, (OUTP) # store output sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lcbc_enc_loop movups STATE, (IVP) .Lcbc_enc_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END ret ENDPROC(aesni_cbc_enc) /* * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ ENTRY(aesni_cbc_dec) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv #endif cmp $16, LEN jb .Lcbc_dec_just_ret mov 480(KEYP), KLEN add $240, KEYP movups (IVP), IV cmp $64, LEN jb .Lcbc_dec_loop1 .align 4 .Lcbc_dec_loop4: movups (INP), IN1 movaps IN1, STATE1 movups 0x10(INP), IN2 movaps IN2, STATE2 #ifdef __x86_64__ movups 0x20(INP), IN3 movaps IN3, STATE3 movups 0x30(INP), IN4 movaps IN4, STATE4 #else movups 0x20(INP), IN1 movaps IN1, STATE3 movups 0x30(INP), IN2 movaps IN2, STATE4 #endif call _aesni_dec4 pxor IV, STATE1 #ifdef __x86_64__ pxor IN1, STATE2 pxor IN2, STATE3 pxor IN3, STATE4 movaps IN4, IV #else pxor IN1, STATE4 movaps IN2, IV movups (INP), IN1 pxor IN1, STATE2 movups 0x10(INP), IN2 pxor IN2, STATE3 #endif movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lcbc_dec_loop4 cmp $16, LEN jb .Lcbc_dec_ret .align 4 .Lcbc_dec_loop1: movups (INP), IN movaps IN, STATE call _aesni_dec1 pxor IV, STATE movups STATE, (OUTP) movaps IN, IV sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lcbc_dec_loop1 .Lcbc_dec_ret: movups IV, (IVP) .Lcbc_dec_just_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END ret ENDPROC(aesni_cbc_dec) #ifdef __x86_64__ .pushsection .rodata .align 16 .Lbswap_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .popsection /* * _aesni_inc_init: internal ABI * setup registers used by _aesni_inc * input: * IV * output: * CTR: == IV, in little endian * TCTR_LOW: == lower qword of CTR * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask */ .align 4 _aesni_inc_init: movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR PSHUFB_XMM BSWAP_MASK CTR mov $1, TCTR_LOW MOVQ_R64_XMM TCTR_LOW INC MOVQ_R64_XMM CTR TCTR_LOW ret ENDPROC(_aesni_inc_init) /* * _aesni_inc: internal ABI * Increase IV by 1, IV is in big endian * input: * IV * CTR: == IV, in little endian * TCTR_LOW: == lower qword of CTR * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask * output: * IV: Increase by 1 * changed: * CTR: == output IV, in little endian * TCTR_LOW: == lower qword of CTR */ .align 4 _aesni_inc: paddq INC, CTR add $1, TCTR_LOW jnc .Linc_low pslldq $8, INC paddq INC, CTR psrldq $8, INC .Linc_low: movaps CTR, IV PSHUFB_XMM BSWAP_MASK IV ret ENDPROC(_aesni_inc) /* * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ ENTRY(aesni_ctr_enc) FRAME_BEGIN cmp $16, LEN jb .Lctr_enc_just_ret mov 480(KEYP), KLEN movups (IVP), IV call _aesni_inc_init cmp $64, LEN jb .Lctr_enc_loop1 .align 4 .Lctr_enc_loop4: movaps IV, STATE1 call _aesni_inc movups (INP), IN1 movaps IV, STATE2 call _aesni_inc movups 0x10(INP), IN2 movaps IV, STATE3 call _aesni_inc movups 0x20(INP), IN3 movaps IV, STATE4 call _aesni_inc movups 0x30(INP), IN4 call _aesni_enc4 pxor IN1, STATE1 movups STATE1, (OUTP) pxor IN2, STATE2 movups STATE2, 0x10(OUTP) pxor IN3, STATE3 movups STATE3, 0x20(OUTP) pxor IN4, STATE4 movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lctr_enc_loop4 cmp $16, LEN jb .Lctr_enc_ret .align 4 .Lctr_enc_loop1: movaps IV, STATE call _aesni_inc movups (INP), IN call _aesni_enc1 pxor IN, STATE movups STATE, (OUTP) sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lctr_enc_loop1 .Lctr_enc_ret: movups IV, (IVP) .Lctr_enc_just_ret: FRAME_END ret ENDPROC(aesni_ctr_enc) /* * _aesni_gf128mul_x_ble: internal ABI * Multiply in GF(2^128) for XTS IVs * input: * IV: current IV * GF128MUL_MASK == mask with 0x87 and 0x01 * output: * IV: next IV * changed: * CTR: == temporary value */ #define _aesni_gf128mul_x_ble() \ pshufd $0x13, IV, CTR; \ paddq IV, IV; \ psrad $31, CTR; \ pand GF128MUL_MASK, CTR; \ pxor CTR, IV; /* * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * bool enc, u8 *iv) */ ENTRY(aesni_xts_crypt8) FRAME_BEGIN cmpb $0, %cl movl $0, %ecx movl $240, %r10d leaq _aesni_enc4, %r11 leaq _aesni_dec4, %rax cmovel %r10d, %ecx cmoveq %rax, %r11 movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK movups (IVP), IV mov 480(KEYP), KLEN addq %rcx, KEYP movdqa IV, STATE1 movdqu 0x00(INP), INC pxor INC, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 movdqu 0x10(INP), INC pxor INC, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 movdqu 0x20(INP), INC pxor INC, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 movdqu 0x30(INP), INC pxor INC, STATE4 movdqu IV, 0x30(OUTP) CALL_NOSPEC %r11 movdqu 0x00(OUTP), INC pxor INC, STATE1 movdqu STATE1, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE1 movdqu 0x40(INP), INC pxor INC, STATE1 movdqu IV, 0x40(OUTP) movdqu 0x10(OUTP), INC pxor INC, STATE2 movdqu STATE2, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 movdqu 0x50(INP), INC pxor INC, STATE2 movdqu IV, 0x50(OUTP) movdqu 0x20(OUTP), INC pxor INC, STATE3 movdqu STATE3, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 movdqu 0x60(INP), INC pxor INC, STATE3 movdqu IV, 0x60(OUTP) movdqu 0x30(OUTP), INC pxor INC, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 movdqu 0x70(INP), INC pxor INC, STATE4 movdqu IV, 0x70(OUTP) _aesni_gf128mul_x_ble() movups IV, (IVP) CALL_NOSPEC %r11 movdqu 0x40(OUTP), INC pxor INC, STATE1 movdqu STATE1, 0x40(OUTP) movdqu 0x50(OUTP), INC pxor INC, STATE2 movdqu STATE2, 0x50(OUTP) movdqu 0x60(OUTP), INC pxor INC, STATE3 movdqu STATE3, 0x60(OUTP) movdqu 0x70(OUTP), INC pxor INC, STATE4 movdqu STATE4, 0x70(OUTP) FRAME_END ret ENDPROC(aesni_xts_crypt8) #endif
AirFortressIlikara/LS2K0300-linux-4.19
6,571
arch/x86/crypto/blowfish-x86_64-asm_64.S
/* * Blowfish Cipher Algorithm (x86_64) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> .file "blowfish-x86_64-asm.S" .text /* structure of crypto context */ #define p 0 #define s0 ((16 + 2) * 4) #define s1 ((16 + 2 + (1 * 256)) * 4) #define s2 ((16 + 2 + (2 * 256)) * 4) #define s3 ((16 + 2 + (3 * 256)) * 4) /* register macros */ #define CTX %r12 #define RIO %rsi #define RX0 %rax #define RX1 %rbx #define RX2 %rcx #define RX3 %rdx #define RX0d %eax #define RX1d %ebx #define RX2d %ecx #define RX3d %edx #define RX0bl %al #define RX1bl %bl #define RX2bl %cl #define RX3bl %dl #define RX0bh %ah #define RX1bh %bh #define RX2bh %ch #define RX3bh %dh #define RT0 %rdi #define RT1 %rsi #define RT2 %r8 #define RT3 %r9 #define RT0d %edi #define RT1d %esi #define RT2d %r8d #define RT3d %r9d #define RKEY %r10 /*********************************************************************** * 1-way blowfish ***********************************************************************/ #define F() \ rorq $16, RX0; \ movzbl RX0bh, RT0d; \ movzbl RX0bl, RT1d; \ rolq $16, RX0; \ movl s0(CTX,RT0,4), RT0d; \ addl s1(CTX,RT1,4), RT0d; \ movzbl RX0bh, RT1d; \ movzbl RX0bl, RT2d; \ rolq $32, RX0; \ xorl s2(CTX,RT1,4), RT0d; \ addl s3(CTX,RT2,4), RT0d; \ xorq RT0, RX0; #define add_roundkey_enc(n) \ xorq p+4*(n)(CTX), RX0; #define round_enc(n) \ add_roundkey_enc(n); \ \ F(); \ F(); #define add_roundkey_dec(n) \ movq p+4*(n-1)(CTX), RT0; \ rorq $32, RT0; \ xorq RT0, RX0; #define round_dec(n) \ add_roundkey_dec(n); \ \ F(); \ F(); \ #define read_block() \ movq (RIO), RX0; \ rorq $32, RX0; \ bswapq RX0; #define write_block() \ bswapq RX0; \ movq RX0, (RIO); #define xor_block() \ bswapq RX0; \ xorq RX0, (RIO); ENTRY(__blowfish_enc_blk) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ movq %r12, %r11; movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; read_block(); round_enc(0); round_enc(2); round_enc(4); round_enc(6); round_enc(8); round_enc(10); round_enc(12); round_enc(14); add_roundkey_enc(16); movq %r11, %r12; movq %r10, RIO; test %cl, %cl; jnz .L__enc_xor; write_block(); ret; .L__enc_xor: xor_block(); ret; ENDPROC(__blowfish_enc_blk) ENTRY(blowfish_dec_blk) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ movq %r12, %r11; movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; read_block(); round_dec(17); round_dec(15); round_dec(13); round_dec(11); round_dec(9); round_dec(7); round_dec(5); round_dec(3); add_roundkey_dec(1); movq %r10, RIO; write_block(); movq %r11, %r12; ret; ENDPROC(blowfish_dec_blk) /********************************************************************** 4-way blowfish, four blocks parallel **********************************************************************/ /* F() for 4-way. Slower when used alone/1-way, but faster when used * parallel/4-way (tested on AMD Phenom II & Intel Xeon E7330). */ #define F4(x) \ movzbl x ## bh, RT1d; \ movzbl x ## bl, RT3d; \ rorq $16, x; \ movzbl x ## bh, RT0d; \ movzbl x ## bl, RT2d; \ rorq $16, x; \ movl s0(CTX,RT0,4), RT0d; \ addl s1(CTX,RT2,4), RT0d; \ xorl s2(CTX,RT1,4), RT0d; \ addl s3(CTX,RT3,4), RT0d; \ xorq RT0, x; #define add_preloaded_roundkey4() \ xorq RKEY, RX0; \ xorq RKEY, RX1; \ xorq RKEY, RX2; \ xorq RKEY, RX3; #define preload_roundkey_enc(n) \ movq p+4*(n)(CTX), RKEY; #define add_roundkey_enc4(n) \ add_preloaded_roundkey4(); \ preload_roundkey_enc(n + 2); #define round_enc4(n) \ add_roundkey_enc4(n); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); #define preload_roundkey_dec(n) \ movq p+4*((n)-1)(CTX), RKEY; \ rorq $32, RKEY; #define add_roundkey_dec4(n) \ add_preloaded_roundkey4(); \ preload_roundkey_dec(n - 2); #define round_dec4(n) \ add_roundkey_dec4(n); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); #define read_block4() \ movq (RIO), RX0; \ rorq $32, RX0; \ bswapq RX0; \ \ movq 8(RIO), RX1; \ rorq $32, RX1; \ bswapq RX1; \ \ movq 16(RIO), RX2; \ rorq $32, RX2; \ bswapq RX2; \ \ movq 24(RIO), RX3; \ rorq $32, RX3; \ bswapq RX3; #define write_block4() \ bswapq RX0; \ movq RX0, (RIO); \ \ bswapq RX1; \ movq RX1, 8(RIO); \ \ bswapq RX2; \ movq RX2, 16(RIO); \ \ bswapq RX3; \ movq RX3, 24(RIO); #define xor_block4() \ bswapq RX0; \ xorq RX0, (RIO); \ \ bswapq RX1; \ xorq RX1, 8(RIO); \ \ bswapq RX2; \ xorq RX2, 16(RIO); \ \ bswapq RX3; \ xorq RX3, 24(RIO); ENTRY(__blowfish_enc_blk_4way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ pushq %r12; pushq %rbx; pushq %rcx; movq %rdi, CTX movq %rsi, %r11; movq %rdx, RIO; preload_roundkey_enc(0); read_block4(); round_enc4(0); round_enc4(2); round_enc4(4); round_enc4(6); round_enc4(8); round_enc4(10); round_enc4(12); round_enc4(14); add_preloaded_roundkey4(); popq %r12; movq %r11, RIO; test %r12b, %r12b; jnz .L__enc_xor4; write_block4(); popq %rbx; popq %r12; ret; .L__enc_xor4: xor_block4(); popq %rbx; popq %r12; ret; ENDPROC(__blowfish_enc_blk_4way) ENTRY(blowfish_dec_blk_4way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ pushq %r12; pushq %rbx; movq %rdi, CTX; movq %rsi, %r11 movq %rdx, RIO; preload_roundkey_dec(17); read_block4(); round_dec4(17); round_dec4(15); round_dec4(13); round_dec4(11); round_dec4(9); round_dec4(7); round_dec4(5); round_dec4(3); add_preloaded_roundkey4(); movq %r11, RIO; write_block4(); popq %rbx; popq %r12; ret; ENDPROC(blowfish_dec_blk_4way)
AirFortressIlikara/LS2K0300-linux-4.19
26,575
arch/x86/crypto/des3_ede-asm_64.S
/* * des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/linkage.h> .file "des3_ede-asm_64.S" .text #define s1 .L_s1 #define s2 ((s1) + (64*8)) #define s3 ((s2) + (64*8)) #define s4 ((s3) + (64*8)) #define s5 ((s4) + (64*8)) #define s6 ((s5) + (64*8)) #define s7 ((s6) + (64*8)) #define s8 ((s7) + (64*8)) /* register macros */ #define CTX %rdi #define RL0 %r8 #define RL1 %r9 #define RL2 %r10 #define RL0d %r8d #define RL1d %r9d #define RL2d %r10d #define RR0 %r11 #define RR1 %r12 #define RR2 %r13 #define RR0d %r11d #define RR1d %r12d #define RR2d %r13d #define RW0 %rax #define RW1 %rbx #define RW2 %rcx #define RW0d %eax #define RW1d %ebx #define RW2d %ecx #define RW0bl %al #define RW1bl %bl #define RW2bl %cl #define RW0bh %ah #define RW1bh %bh #define RW2bh %ch #define RT0 %r15 #define RT1 %rsi #define RT2 %r14 #define RT3 %rdx #define RT0d %r15d #define RT1d %esi #define RT2d %r14d #define RT3d %edx /*********************************************************************** * 1-way 3DES ***********************************************************************/ #define do_permutation(a, b, offset, mask) \ movl a, RT0d; \ shrl $(offset), RT0d; \ xorl b, RT0d; \ andl $(mask), RT0d; \ xorl RT0d, b; \ shll $(offset), RT0d; \ xorl RT0d, a; #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation(left, right) \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ movl left##d, RW0d; \ roll $1, right##d; \ xorl right##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##d; \ xorl RW0d, right##d; \ roll $1, left##d; \ expand_to_64bits(right, RT3); \ expand_to_64bits(left, RT3); #define final_permutation(left, right) \ compress_to_64bits(right); \ compress_to_64bits(left); \ movl right##d, RW0d; \ rorl $1, left##d; \ xorl left##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##d; \ xorl RW0d, left##d; \ rorl $1, right##d; \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); #define round1(n, from, to, load_next_key) \ xorq from, RW0; \ \ movzbl RW0bl, RT0d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ shrq $16, RW0; \ movq s8(, RT0, 8), RT0; \ xorq s6(, RT1, 8), to; \ movzbl RW0bl, RL1d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s4(, RT2, 8), RT0; \ xorq s2(, RT3, 8), to; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ xorq s7(, RL1, 8), RT0; \ xorq s5(, RT1, 8), to; \ xorq s3(, RT2, 8), RT0; \ load_next_key(n, RW0); \ xorq RT0, to; \ xorq s1(, RT3, 8), to; \ #define load_next_key(n, RWx) \ movq (((n) + 1) * 8)(CTX), RWx; #define dummy2(a, b) /*_*/ #define read_block(io, left, right) \ movl (io), left##d; \ movl 4(io), right##d; \ bswapl left##d; \ bswapl right##d; #define write_block(io, left, right) \ bswapl left##d; \ bswapl right##d; \ movl left##d, (io); \ movl right##d, 4(io); ENTRY(des3_ede_x86_64_crypt_blk) /* input: * %rdi: round keys, CTX * %rsi: dst * %rdx: src */ pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi; /* dst */ read_block(%rdx, RL0, RR0); initial_permutation(RL0, RR0); movq (CTX), RW0; round1(0, RR0, RL0, load_next_key); round1(1, RL0, RR0, load_next_key); round1(2, RR0, RL0, load_next_key); round1(3, RL0, RR0, load_next_key); round1(4, RR0, RL0, load_next_key); round1(5, RL0, RR0, load_next_key); round1(6, RR0, RL0, load_next_key); round1(7, RL0, RR0, load_next_key); round1(8, RR0, RL0, load_next_key); round1(9, RL0, RR0, load_next_key); round1(10, RR0, RL0, load_next_key); round1(11, RL0, RR0, load_next_key); round1(12, RR0, RL0, load_next_key); round1(13, RL0, RR0, load_next_key); round1(14, RR0, RL0, load_next_key); round1(15, RL0, RR0, load_next_key); round1(16+0, RL0, RR0, load_next_key); round1(16+1, RR0, RL0, load_next_key); round1(16+2, RL0, RR0, load_next_key); round1(16+3, RR0, RL0, load_next_key); round1(16+4, RL0, RR0, load_next_key); round1(16+5, RR0, RL0, load_next_key); round1(16+6, RL0, RR0, load_next_key); round1(16+7, RR0, RL0, load_next_key); round1(16+8, RL0, RR0, load_next_key); round1(16+9, RR0, RL0, load_next_key); round1(16+10, RL0, RR0, load_next_key); round1(16+11, RR0, RL0, load_next_key); round1(16+12, RL0, RR0, load_next_key); round1(16+13, RR0, RL0, load_next_key); round1(16+14, RL0, RR0, load_next_key); round1(16+15, RR0, RL0, load_next_key); round1(32+0, RR0, RL0, load_next_key); round1(32+1, RL0, RR0, load_next_key); round1(32+2, RR0, RL0, load_next_key); round1(32+3, RL0, RR0, load_next_key); round1(32+4, RR0, RL0, load_next_key); round1(32+5, RL0, RR0, load_next_key); round1(32+6, RR0, RL0, load_next_key); round1(32+7, RL0, RR0, load_next_key); round1(32+8, RR0, RL0, load_next_key); round1(32+9, RL0, RR0, load_next_key); round1(32+10, RR0, RL0, load_next_key); round1(32+11, RL0, RR0, load_next_key); round1(32+12, RR0, RL0, load_next_key); round1(32+13, RL0, RR0, load_next_key); round1(32+14, RR0, RL0, load_next_key); round1(32+15, RL0, RR0, dummy2); final_permutation(RR0, RL0); popq %rsi /* dst */ write_block(%rsi, RR0, RL0); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; ret; ENDPROC(des3_ede_x86_64_crypt_blk) /*********************************************************************** * 3-way 3DES ***********************************************************************/ #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation3(left, right) \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ \ movl left##0d, RW0d; \ roll $1, right##0d; \ xorl right##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##0d; \ xorl RW0d, right##0d; \ roll $1, left##0d; \ expand_to_64bits(right##0, RT3); \ expand_to_64bits(left##0, RT3); \ movl left##1d, RW1d; \ roll $1, right##1d; \ xorl right##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, left##1d; \ xorl RW1d, right##1d; \ roll $1, left##1d; \ expand_to_64bits(right##1, RT3); \ expand_to_64bits(left##1, RT3); \ movl left##2d, RW2d; \ roll $1, right##2d; \ xorl right##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, left##2d; \ xorl RW2d, right##2d; \ roll $1, left##2d; \ expand_to_64bits(right##2, RT3); \ expand_to_64bits(left##2, RT3); #define final_permutation3(left, right) \ compress_to_64bits(right##0); \ compress_to_64bits(left##0); \ movl right##0d, RW0d; \ rorl $1, left##0d; \ xorl left##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##0d; \ xorl RW0d, left##0d; \ rorl $1, right##0d; \ compress_to_64bits(right##1); \ compress_to_64bits(left##1); \ movl right##1d, RW1d; \ rorl $1, left##1d; \ xorl left##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, right##1d; \ xorl RW1d, left##1d; \ rorl $1, right##1d; \ compress_to_64bits(right##2); \ compress_to_64bits(left##2); \ movl right##2d, RW2d; \ rorl $1, left##2d; \ xorl left##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, right##2d; \ xorl RW2d, left##2d; \ rorl $1, right##2d; \ \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); #define round3(n, from, to, load_next_key, do_movq) \ xorq from##0, RW0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s8(, RT3, 8), to##0; \ xorq s6(, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s4(, RT3, 8), to##0; \ xorq s2(, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s7(, RT3, 8), to##0; \ xorq s5(, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ load_next_key(n, RW0); \ xorq s3(, RT3, 8), to##0; \ xorq s1(, RT1, 8), to##0; \ xorq from##1, RW1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s8(, RT3, 8), to##1; \ xorq s6(, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s4(, RT3, 8), to##1; \ xorq s2(, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrl $16, RW1d; \ xorq s7(, RT3, 8), to##1; \ xorq s5(, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ do_movq(RW0, RW1); \ xorq s3(, RT3, 8), to##1; \ xorq s1(, RT1, 8), to##1; \ xorq from##2, RW2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s8(, RT3, 8), to##2; \ xorq s6(, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s4(, RT3, 8), to##2; \ xorq s2(, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrl $16, RW2d; \ xorq s7(, RT3, 8), to##2; \ xorq s5(, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ do_movq(RW0, RW2); \ xorq s3(, RT3, 8), to##2; \ xorq s1(, RT1, 8), to##2; #define __movq(src, dst) \ movq src, dst; ENTRY(des3_ede_x86_64_crypt_blk_3way) /* input: * %rdi: ctx, round keys * %rsi: dst (3 blocks) * %rdx: src (3 blocks) */ pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi /* dst */ /* load input */ movl 0 * 4(%rdx), RL0d; movl 1 * 4(%rdx), RR0d; movl 2 * 4(%rdx), RL1d; movl 3 * 4(%rdx), RR1d; movl 4 * 4(%rdx), RL2d; movl 5 * 4(%rdx), RR2d; bswapl RL0d; bswapl RR0d; bswapl RL1d; bswapl RR1d; bswapl RL2d; bswapl RR2d; initial_permutation3(RL, RR); movq 0(CTX), RW0; movq RW0, RW1; movq RW0, RW2; round3(0, RR, RL, load_next_key, __movq); round3(1, RL, RR, load_next_key, __movq); round3(2, RR, RL, load_next_key, __movq); round3(3, RL, RR, load_next_key, __movq); round3(4, RR, RL, load_next_key, __movq); round3(5, RL, RR, load_next_key, __movq); round3(6, RR, RL, load_next_key, __movq); round3(7, RL, RR, load_next_key, __movq); round3(8, RR, RL, load_next_key, __movq); round3(9, RL, RR, load_next_key, __movq); round3(10, RR, RL, load_next_key, __movq); round3(11, RL, RR, load_next_key, __movq); round3(12, RR, RL, load_next_key, __movq); round3(13, RL, RR, load_next_key, __movq); round3(14, RR, RL, load_next_key, __movq); round3(15, RL, RR, load_next_key, __movq); round3(16+0, RL, RR, load_next_key, __movq); round3(16+1, RR, RL, load_next_key, __movq); round3(16+2, RL, RR, load_next_key, __movq); round3(16+3, RR, RL, load_next_key, __movq); round3(16+4, RL, RR, load_next_key, __movq); round3(16+5, RR, RL, load_next_key, __movq); round3(16+6, RL, RR, load_next_key, __movq); round3(16+7, RR, RL, load_next_key, __movq); round3(16+8, RL, RR, load_next_key, __movq); round3(16+9, RR, RL, load_next_key, __movq); round3(16+10, RL, RR, load_next_key, __movq); round3(16+11, RR, RL, load_next_key, __movq); round3(16+12, RL, RR, load_next_key, __movq); round3(16+13, RR, RL, load_next_key, __movq); round3(16+14, RL, RR, load_next_key, __movq); round3(16+15, RR, RL, load_next_key, __movq); round3(32+0, RR, RL, load_next_key, __movq); round3(32+1, RL, RR, load_next_key, __movq); round3(32+2, RR, RL, load_next_key, __movq); round3(32+3, RL, RR, load_next_key, __movq); round3(32+4, RR, RL, load_next_key, __movq); round3(32+5, RL, RR, load_next_key, __movq); round3(32+6, RR, RL, load_next_key, __movq); round3(32+7, RL, RR, load_next_key, __movq); round3(32+8, RR, RL, load_next_key, __movq); round3(32+9, RL, RR, load_next_key, __movq); round3(32+10, RR, RL, load_next_key, __movq); round3(32+11, RL, RR, load_next_key, __movq); round3(32+12, RR, RL, load_next_key, __movq); round3(32+13, RL, RR, load_next_key, __movq); round3(32+14, RR, RL, load_next_key, __movq); round3(32+15, RL, RR, dummy2, dummy2); final_permutation3(RR, RL); bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; popq %rsi /* dst */ movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; ret; ENDPROC(des3_ede_x86_64_crypt_blk_3way) .section .rodata, "a", @progbits .align 16 .L_s1: .quad 0x0010100001010400, 0x0000000000000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0010100001010004, 0x0000100000010404 .quad 0x0000000000000004, 0x0000100000010000 .quad 0x0000000000000400, 0x0010100001010400 .quad 0x0010100001010404, 0x0000000000000400 .quad 0x0010000001000404, 0x0010100001010004 .quad 0x0010000001000000, 0x0000000000000004 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000100000010400 .quad 0x0000100000010400, 0x0010100001010000 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0000100000010004, 0x0010000001000004 .quad 0x0010000001000004, 0x0000100000010004 .quad 0x0000000000000000, 0x0000000000000404 .quad 0x0000100000010404, 0x0010000001000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0000000000000004, 0x0010100001010000 .quad 0x0010100001010400, 0x0010000001000000 .quad 0x0010000001000000, 0x0000000000000400 .quad 0x0010100001010004, 0x0000100000010000 .quad 0x0000100000010400, 0x0010000001000004 .quad 0x0000000000000400, 0x0000000000000004 .quad 0x0010000001000404, 0x0000100000010404 .quad 0x0010100001010404, 0x0000100000010004 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0010000001000004, 0x0000000000000404 .quad 0x0000100000010404, 0x0010100001010400 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000000000000000 .quad 0x0000100000010004, 0x0000100000010400 .quad 0x0000000000000000, 0x0010100001010004 .L_s2: .quad 0x0801080200100020, 0x0800080000000000 .quad 0x0000080000000000, 0x0001080200100020 .quad 0x0001000000100000, 0x0000000200000020 .quad 0x0801000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0801080200100020 .quad 0x0801080000100000, 0x0800000000000000 .quad 0x0800080000000000, 0x0001000000100000 .quad 0x0000000200000020, 0x0801000200100020 .quad 0x0001080000100000, 0x0001000200100020 .quad 0x0800080200000020, 0x0000000000000000 .quad 0x0800000000000000, 0x0000080000000000 .quad 0x0001080200100020, 0x0801000000100000 .quad 0x0001000200100020, 0x0800000200000020 .quad 0x0000000000000000, 0x0001080000100000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0801000000100000, 0x0000080200000020 .quad 0x0000000000000000, 0x0001080200100020 .quad 0x0801000200100020, 0x0001000000100000 .quad 0x0800080200000020, 0x0801000000100000 .quad 0x0801080000100000, 0x0000080000000000 .quad 0x0801000000100000, 0x0800080000000000 .quad 0x0000000200000020, 0x0801080200100020 .quad 0x0001080200100020, 0x0000000200000020 .quad 0x0000080000000000, 0x0800000000000000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0001000000100000, 0x0800000200000020 .quad 0x0001000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0001000200100020 .quad 0x0001080000100000, 0x0000000000000000 .quad 0x0800080000000000, 0x0000080200000020 .quad 0x0800000000000000, 0x0801000200100020 .quad 0x0801080200100020, 0x0001080000100000 .L_s3: .quad 0x0000002000000208, 0x0000202008020200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000202000020208, 0x0000002008000200 .quad 0x0000200000020008, 0x0000000008000008 .quad 0x0000000008000008, 0x0000200000020000 .quad 0x0000202008020208, 0x0000200000020008 .quad 0x0000200008020000, 0x0000002000000208 .quad 0x0000000008000000, 0x0000000000000008 .quad 0x0000202008020200, 0x0000002000000200 .quad 0x0000202000020200, 0x0000200008020000 .quad 0x0000200008020008, 0x0000202000020208 .quad 0x0000002008000208, 0x0000202000020200 .quad 0x0000200000020000, 0x0000002008000208 .quad 0x0000000000000008, 0x0000202008020208 .quad 0x0000002000000200, 0x0000000008000000 .quad 0x0000202008020200, 0x0000000008000000 .quad 0x0000200000020008, 0x0000002000000208 .quad 0x0000200000020000, 0x0000202008020200 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000002000000200, 0x0000200000020008 .quad 0x0000202008020208, 0x0000002008000200 .quad 0x0000000008000008, 0x0000002000000200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000208, 0x0000200000020000 .quad 0x0000000008000000, 0x0000202008020208 .quad 0x0000000000000008, 0x0000202000020208 .quad 0x0000202000020200, 0x0000000008000008 .quad 0x0000200008020000, 0x0000002008000208 .quad 0x0000002000000208, 0x0000200008020000 .quad 0x0000202000020208, 0x0000000000000008 .quad 0x0000200008020008, 0x0000202000020200 .L_s4: .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x0000020000002000, 0x0008020800002000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x0000020000002000, 0x0008020800002000 .L_s5: .quad 0x0000001000000100, 0x0020001002080100 .quad 0x0020000002080000, 0x0420001002000100 .quad 0x0000000000080000, 0x0000001000000100 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0400001000080100, 0x0000000000080000 .quad 0x0020001002000100, 0x0400001000080100 .quad 0x0420001002000100, 0x0420000002080000 .quad 0x0000001000080100, 0x0400000000000000 .quad 0x0020000002000000, 0x0400000000080000 .quad 0x0400000000080000, 0x0000000000000000 .quad 0x0400001000000100, 0x0420001002080100 .quad 0x0420001002080100, 0x0020001002000100 .quad 0x0420000002080000, 0x0400001000000100 .quad 0x0000000000000000, 0x0420000002000000 .quad 0x0020001002080100, 0x0020000002000000 .quad 0x0420000002000000, 0x0000001000080100 .quad 0x0000000000080000, 0x0420001002000100 .quad 0x0000001000000100, 0x0020000002000000 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0420001002000100, 0x0400001000080100 .quad 0x0020001002000100, 0x0400000000000000 .quad 0x0420000002080000, 0x0020001002080100 .quad 0x0400001000080100, 0x0000001000000100 .quad 0x0020000002000000, 0x0420000002080000 .quad 0x0420001002080100, 0x0000001000080100 .quad 0x0420000002000000, 0x0420001002080100 .quad 0x0020000002080000, 0x0000000000000000 .quad 0x0400000000080000, 0x0420000002000000 .quad 0x0000001000080100, 0x0020001002000100 .quad 0x0400001000000100, 0x0000000000080000 .quad 0x0000000000000000, 0x0400000000080000 .quad 0x0020001002080100, 0x0400001000000100 .L_s6: .quad 0x0200000120000010, 0x0204000020000000 .quad 0x0000040000000000, 0x0204040120000010 .quad 0x0204000020000000, 0x0000000100000010 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0200040020000000, 0x0004040100000010 .quad 0x0004000000000000, 0x0200000120000010 .quad 0x0004000100000010, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0000000000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000040000000000 .quad 0x0004040000000000, 0x0200040120000010 .quad 0x0000000100000010, 0x0204000120000010 .quad 0x0204000120000010, 0x0000000000000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000040100000010, 0x0004040000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0200040020000000, 0x0000000100000010 .quad 0x0204000120000010, 0x0004040000000000 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0000040100000010, 0x0200000120000010 .quad 0x0004000000000000, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0200000120000010, 0x0204040120000010 .quad 0x0004040000000000, 0x0204000020000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000000000000000, 0x0204000120000010 .quad 0x0000000100000010, 0x0000040000000000 .quad 0x0204000020000000, 0x0004040100000010 .quad 0x0000040000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000000000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0004000100000010, 0x0200040120000010 .L_s7: .quad 0x0002000000200000, 0x2002000004200002 .quad 0x2000000004000802, 0x0000000000000000 .quad 0x0000000000000800, 0x2000000004000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2002000004200802, 0x0002000000200000 .quad 0x0000000000000000, 0x2000000004000002 .quad 0x2000000000000002, 0x0000000004000000 .quad 0x2002000004200002, 0x2000000000000802 .quad 0x0000000004000800, 0x2002000000200802 .quad 0x2002000000200002, 0x0000000004000800 .quad 0x2000000004000002, 0x0002000004200000 .quad 0x0002000004200800, 0x2002000000200002 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000000000802, 0x2002000004200802 .quad 0x0002000000200800, 0x2000000000000002 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0002000000200000, 0x2000000004000802 .quad 0x2000000004000802, 0x2002000004200002 .quad 0x2002000004200002, 0x2000000000000002 .quad 0x2002000000200002, 0x0000000004000000 .quad 0x0000000004000800, 0x0002000000200000 .quad 0x0002000004200800, 0x2000000000000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2000000000000802, 0x2000000004000002 .quad 0x2002000004200802, 0x0002000004200000 .quad 0x0002000000200800, 0x0000000000000000 .quad 0x2000000000000002, 0x2002000004200802 .quad 0x0000000000000000, 0x2002000000200802 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000004000002, 0x0000000004000800 .quad 0x0000000000000800, 0x2002000000200002 .L_s8: .quad 0x0100010410001000, 0x0000010000001000 .quad 0x0000000000040000, 0x0100010410041000 .quad 0x0100000010000000, 0x0100010410001000 .quad 0x0000000400000000, 0x0100000010000000 .quad 0x0000000400040000, 0x0100000010040000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0100010010041000, 0x0000010400041000 .quad 0x0000010000001000, 0x0000000400000000 .quad 0x0100000010040000, 0x0100000410000000 .quad 0x0100010010001000, 0x0000010400001000 .quad 0x0000010000041000, 0x0000000400040000 .quad 0x0100000410040000, 0x0100010010041000 .quad 0x0000010400001000, 0x0000000000000000 .quad 0x0000000000000000, 0x0100000410040000 .quad 0x0100000410000000, 0x0100010010001000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0100010010041000, 0x0000010000001000 .quad 0x0000000400000000, 0x0100000410040000 .quad 0x0000010000001000, 0x0000010400041000 .quad 0x0100010010001000, 0x0000000400000000 .quad 0x0100000410000000, 0x0100000010040000 .quad 0x0100000410040000, 0x0100000010000000 .quad 0x0000000000040000, 0x0100010410001000 .quad 0x0000000000000000, 0x0100010410041000 .quad 0x0000000400040000, 0x0100000410000000 .quad 0x0100000010040000, 0x0100010010001000 .quad 0x0100010410001000, 0x0000000000000000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0000010000041000, 0x0000010400001000 .quad 0x0000010400001000, 0x0000000400040000 .quad 0x0100000010000000, 0x0100010010041000
AirFortressIlikara/LS2K0300-linux-4.19
13,449
arch/x86/crypto/cast5-avx-x86_64-asm_64.S
/* * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> #include <asm/frame.h> .file "cast5-avx-x86_64-asm_64.S" .extern cast_s1 .extern cast_s2 .extern cast_s3 .extern cast_s4 /* structure of crypto context */ #define km 0 #define kr (16*4) #define rr ((16*4)+16) /* s-boxes */ #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 /********************************************************************** 16-way AVX cast5 **********************************************************************/ #define CTX %r15 #define RL1 %xmm0 #define RR1 %xmm1 #define RL2 %xmm2 #define RR2 %xmm3 #define RL3 %xmm4 #define RR3 %xmm5 #define RL4 %xmm6 #define RR4 %xmm7 #define RX %xmm8 #define RKM %xmm9 #define RKR %xmm10 #define RKRF %xmm11 #define RKRR %xmm12 #define R32 %xmm13 #define R1ST %xmm14 #define RTMP %xmm15 #define RID1 %rdi #define RID1d %edi #define RID2 %rsi #define RID2d %esi #define RGI1 %rdx #define RGI1bl %dl #define RGI1bh %dh #define RGI2 %rcx #define RGI2bl %cl #define RGI2bh %ch #define RGI3 %rax #define RGI3bl %al #define RGI3bh %ah #define RGI4 %rbx #define RGI4bl %bl #define RGI4bh %bh #define RFS1 %r8 #define RFS1d %r8d #define RFS2 %r9 #define RFS2d %r9d #define RFS3 %r10 #define RFS3d %r10d #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ shrq $16, src; \ movl s1(, RID1, 4), dst ## d; \ op1 s2(, RID2, 4), dst ## d; \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ interleave_op(il_reg); \ op2 s3(, RID1, 4), dst ## d; \ op3 s4(, RID2, 4), dst ## d; #define dummy(d) /* do nothing */ #define shr_next(reg) \ shrq $16, reg; #define F_head(a, x, gi1, gi2, op0) \ op0 a, RKM, x; \ vpslld RKRF, x, RTMP; \ vpsrld RKRR, x, x; \ vpor RTMP, x, x; \ \ vmovq x, gi1; \ vpextrq $1, x, gi2; #define F_tail(a, x, gi1, gi2, op1, op2, op3) \ lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ \ lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ shlq $32, RFS2; \ orq RFS1, RFS2; \ lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ shlq $32, RFS1; \ orq RFS1, RFS3; \ \ vmovq RFS2, x; \ vpinsrq $1, RFS3, x, x; #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ F_head(b1, RX, RGI1, RGI2, op0); \ F_head(b2, RX, RGI3, RGI4, op0); \ \ F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ \ vpxor a1, RX, a1; \ vpxor a2, RTMP, a2; #define F1_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) #define F2_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) #define F3_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) #define subround(a1, b1, a2, b2, f) \ F ## f ## _2(a1, b1, a2, b2); #define round(l, r, n, f) \ vbroadcastss (km+(4*n))(CTX), RKM; \ vpand R1ST, RKR, RKRF; \ vpsubq RKRF, R32, RKRR; \ vpsrldq $1, RKR, RKR; \ subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \ subround(l ## 3, r ## 3, l ## 4, r ## 4, f); #define enc_preload_rkr() \ vbroadcastss .L16_mask, RKR; \ /* add 16-bit rotation to key rotations (mod 32) */ \ vpxor kr(CTX), RKR, RKR; #define dec_preload_rkr() \ vbroadcastss .L16_mask, RKR; \ /* add 16-bit rotation to key rotations (mod 32) */ \ vpxor kr(CTX), RKR, RKR; \ vpshufb .Lbswap128_mask, RKR, RKR; #define transpose_2x4(x0, x1, t0, t1) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t1; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; #define inpack_blocks(x0, x1, t0, t1, rmask) \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; \ \ transpose_2x4(x0, x1, t0, t1) #define outunpack_blocks(x0, x1, t0, t1, rmask) \ transpose_2x4(x0, x1, t0, t1) \ \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 .align 16 .Lbswap_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16 .align 16 .Lbswap_iv_mask: .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst4.16_mask, "aM", @progbits, 4 .align 4 .L16_mask: .byte 16, 16, 16, 16 .section .rodata.cst4.32_mask, "aM", @progbits, 4 .align 4 .L32_mask: .byte 32, 0, 0, 0 .section .rodata.cst4.first_mask, "aM", @progbits, 4 .align 4 .Lfirst_mask: .byte 0x1f, 0, 0, 0 .text .align 16 __cast5_enc_blk16: /* input: * %rdi: ctx * RL1: blocks 1 and 2 * RR1: blocks 3 and 4 * RL2: blocks 5 and 6 * RR2: blocks 7 and 8 * RL3: blocks 9 and 10 * RR3: blocks 11 and 12 * RL4: blocks 13 and 14 * RR4: blocks 15 and 16 * output: * RL1: encrypted blocks 1 and 2 * RR1: encrypted blocks 3 and 4 * RL2: encrypted blocks 5 and 6 * RR2: encrypted blocks 7 and 8 * RL3: encrypted blocks 9 and 10 * RR3: encrypted blocks 11 and 12 * RL4: encrypted blocks 13 and 14 * RR4: encrypted blocks 15 and 16 */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; enc_preload_rkr(); inpack_blocks(RL1, RR1, RTMP, RX, RKM); inpack_blocks(RL2, RR2, RTMP, RX, RKM); inpack_blocks(RL3, RR3, RTMP, RX, RKM); inpack_blocks(RL4, RR4, RTMP, RX, RKM); round(RL, RR, 0, 1); round(RR, RL, 1, 2); round(RL, RR, 2, 3); round(RR, RL, 3, 1); round(RL, RR, 4, 2); round(RR, RL, 5, 3); round(RL, RR, 6, 1); round(RR, RL, 7, 2); round(RL, RR, 8, 3); round(RR, RL, 9, 1); round(RL, RR, 10, 2); round(RR, RL, 11, 3); movzbl rr(CTX), %eax; testl %eax, %eax; jnz .L__skip_enc; round(RL, RR, 12, 1); round(RR, RL, 13, 2); round(RL, RR, 14, 3); round(RR, RL, 15, 1); .L__skip_enc: popq %rbx; popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RR1, RL1, RTMP, RX, RKM); outunpack_blocks(RR2, RL2, RTMP, RX, RKM); outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); ret; ENDPROC(__cast5_enc_blk16) .align 16 __cast5_dec_blk16: /* input: * %rdi: ctx * RL1: encrypted blocks 1 and 2 * RR1: encrypted blocks 3 and 4 * RL2: encrypted blocks 5 and 6 * RR2: encrypted blocks 7 and 8 * RL3: encrypted blocks 9 and 10 * RR3: encrypted blocks 11 and 12 * RL4: encrypted blocks 13 and 14 * RR4: encrypted blocks 15 and 16 * output: * RL1: decrypted blocks 1 and 2 * RR1: decrypted blocks 3 and 4 * RL2: decrypted blocks 5 and 6 * RR2: decrypted blocks 7 and 8 * RL3: decrypted blocks 9 and 10 * RR3: decrypted blocks 11 and 12 * RL4: decrypted blocks 13 and 14 * RR4: decrypted blocks 15 and 16 */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; dec_preload_rkr(); inpack_blocks(RL1, RR1, RTMP, RX, RKM); inpack_blocks(RL2, RR2, RTMP, RX, RKM); inpack_blocks(RL3, RR3, RTMP, RX, RKM); inpack_blocks(RL4, RR4, RTMP, RX, RKM); movzbl rr(CTX), %eax; testl %eax, %eax; jnz .L__skip_dec; round(RL, RR, 15, 1); round(RR, RL, 14, 3); round(RL, RR, 13, 2); round(RR, RL, 12, 1); .L__dec_tail: round(RL, RR, 11, 3); round(RR, RL, 10, 2); round(RL, RR, 9, 1); round(RR, RL, 8, 3); round(RL, RR, 7, 2); round(RR, RL, 6, 1); round(RL, RR, 5, 3); round(RR, RL, 4, 2); round(RL, RR, 3, 1); round(RR, RL, 2, 3); round(RL, RR, 1, 2); round(RR, RL, 0, 1); vmovdqa .Lbswap_mask, RKM; popq %rbx; popq %r15; outunpack_blocks(RR1, RL1, RTMP, RX, RKM); outunpack_blocks(RR2, RL2, RTMP, RX, RKM); outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); ret; .L__skip_dec: vpsrldq $4, RKR, RKR; jmp .L__dec_tail; ENDPROC(__cast5_dec_blk16) ENTRY(cast5_ecb_enc_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; vmovdqu (1*4*4)(%rdx), RR1; vmovdqu (2*4*4)(%rdx), RL2; vmovdqu (3*4*4)(%rdx), RR2; vmovdqu (4*4*4)(%rdx), RL3; vmovdqu (5*4*4)(%rdx), RR3; vmovdqu (6*4*4)(%rdx), RL4; vmovdqu (7*4*4)(%rdx), RR4; call __cast5_enc_blk16; vmovdqu RR1, (0*4*4)(%r11); vmovdqu RL1, (1*4*4)(%r11); vmovdqu RR2, (2*4*4)(%r11); vmovdqu RL2, (3*4*4)(%r11); vmovdqu RR3, (4*4*4)(%r11); vmovdqu RL3, (5*4*4)(%r11); vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); popq %r15; FRAME_END ret; ENDPROC(cast5_ecb_enc_16way) ENTRY(cast5_ecb_dec_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; vmovdqu (1*4*4)(%rdx), RR1; vmovdqu (2*4*4)(%rdx), RL2; vmovdqu (3*4*4)(%rdx), RR2; vmovdqu (4*4*4)(%rdx), RL3; vmovdqu (5*4*4)(%rdx), RR3; vmovdqu (6*4*4)(%rdx), RL4; vmovdqu (7*4*4)(%rdx), RR4; call __cast5_dec_blk16; vmovdqu RR1, (0*4*4)(%r11); vmovdqu RL1, (1*4*4)(%r11); vmovdqu RR2, (2*4*4)(%r11); vmovdqu RL2, (3*4*4)(%r11); vmovdqu RR3, (4*4*4)(%r11); vmovdqu RL3, (5*4*4)(%r11); vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); popq %r15; FRAME_END ret; ENDPROC(cast5_ecb_dec_16way) ENTRY(cast5_cbc_dec_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r12; pushq %r15; movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; vmovdqu (0*16)(%rdx), RL1; vmovdqu (1*16)(%rdx), RR1; vmovdqu (2*16)(%rdx), RL2; vmovdqu (3*16)(%rdx), RR2; vmovdqu (4*16)(%rdx), RL3; vmovdqu (5*16)(%rdx), RR3; vmovdqu (6*16)(%rdx), RL4; vmovdqu (7*16)(%rdx), RR4; call __cast5_dec_blk16; /* xor with src */ vmovq (%r12), RX; vpshufd $0x4f, RX, RX; vpxor RX, RR1, RR1; vpxor 0*16+8(%r12), RL1, RL1; vpxor 1*16+8(%r12), RR2, RR2; vpxor 2*16+8(%r12), RL2, RL2; vpxor 3*16+8(%r12), RR3, RR3; vpxor 4*16+8(%r12), RL3, RL3; vpxor 5*16+8(%r12), RR4, RR4; vpxor 6*16+8(%r12), RL4, RL4; vmovdqu RR1, (0*16)(%r11); vmovdqu RL1, (1*16)(%r11); vmovdqu RR2, (2*16)(%r11); vmovdqu RL2, (3*16)(%r11); vmovdqu RR3, (4*16)(%r11); vmovdqu RL3, (5*16)(%r11); vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); popq %r15; popq %r12; FRAME_END ret; ENDPROC(cast5_cbc_dec_16way) ENTRY(cast5_ctr_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: iv (big endian, 64bit) */ FRAME_BEGIN pushq %r12; pushq %r15; movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; vpcmpeqd RTMP, RTMP, RTMP; vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ vpcmpeqd RKR, RKR, RKR; vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */ vmovdqa .Lbswap_iv_mask, R1ST; vmovdqa .Lbswap128_mask, RKM; /* load IV and byteswap */ vmovq (%rcx), RX; vpshufb R1ST, RX, RX; /* construct IVs */ vpsubq RTMP, RX, RX; /* le: IV1, IV0 */ vpshufb RKM, RX, RL1; /* be: IV0, IV1 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR1; /* be: IV2, IV3 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RL2; /* be: IV4, IV5 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR2; /* be: IV6, IV7 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RL3; /* be: IV8, IV9 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR3; /* be: IV10, IV11 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RL4; /* be: IV12, IV13 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR4; /* be: IV14, IV15 */ /* store last IV */ vpsubq RTMP, RX, RX; /* le: IV16, IV14 */ vpshufb R1ST, RX, RX; /* be: IV16, IV16 */ vmovq RX, (%rcx); call __cast5_enc_blk16; /* dst = src ^ iv */ vpxor (0*16)(%r12), RR1, RR1; vpxor (1*16)(%r12), RL1, RL1; vpxor (2*16)(%r12), RR2, RR2; vpxor (3*16)(%r12), RL2, RL2; vpxor (4*16)(%r12), RR3, RR3; vpxor (5*16)(%r12), RL3, RL3; vpxor (6*16)(%r12), RR4, RR4; vpxor (7*16)(%r12), RL4, RL4; vmovdqu RR1, (0*16)(%r11); vmovdqu RL1, (1*16)(%r11); vmovdqu RR2, (2*16)(%r11); vmovdqu RL2, (3*16)(%r11); vmovdqu RR3, (4*16)(%r11); vmovdqu RL3, (5*16)(%r11); vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); popq %r15; popq %r12; FRAME_END ret; ENDPROC(cast5_ctr_16way)
AirFortressIlikara/LS2K0300-linux-4.19
23,727
arch/x86/crypto/serpent-avx-x86_64-asm_64.S
/* * Serpent Cipher 8-way parallel algorithm (x86_64/AVX) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx.S" .file "serpent-avx-x86_64-asm_64.S" .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16 .align 16 .Lxts_gf128mul_and_shl1_mask: .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .text #define CTX %rdi /********************************************************************** 8-way AVX serpent **********************************************************************/ #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RE1 %xmm4 #define tp %xmm5 #define RA2 %xmm6 #define RB2 %xmm7 #define RC2 %xmm8 #define RD2 %xmm9 #define RE2 %xmm10 #define RNOT %xmm11 #define RK0 %xmm12 #define RK1 %xmm13 #define RK2 %xmm14 #define RK3 %xmm15 #define S0_1(x0, x1, x2, x3, x4) \ vpor x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, x3, x4; \ vpxor RNOT, x4, x4; \ vpxor x1, tp, x3; \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpxor x0, x2, x2; #define S0_2(x0, x1, x2, x3, x4) \ vpxor x3, x0, x0; \ vpor x0, x4, x4; \ vpxor x2, x0, x0; \ vpand x1, x2, x2; \ vpxor x2, x3, x3; \ vpxor RNOT, x1, x1; \ vpxor x4, x2, x2; \ vpxor x2, x1, x1; #define S1_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, tp; \ vpxor x3, x0, x0; \ vpxor RNOT, x3, x3; \ vpand tp, x1, x4; \ vpor tp, x0, x0; \ vpxor x2, x3, x3; \ vpxor x3, x0, x0; \ vpxor x3, tp, x1; #define S1_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpor x4, x1, x1; \ vpxor x2, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x2, x2; \ vpor x0, x1, x1; \ vpxor RNOT, x0, x0; \ vpxor x2, x0, x0; \ vpxor x1, x4, x4; #define S2_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, tp; \ vpxor x3, tp, tp; \ vpor x0, x3, x3; \ vpxor x1, x2, x2; \ vpxor x1, x3, x3; \ vpand tp, x1, x1; #define S2_2(x0, x1, x2, x3, x4) \ vpxor x2, tp, tp; \ vpand x3, x2, x2; \ vpor x1, x3, x3; \ vpxor RNOT, tp, tp; \ vpxor tp, x3, x3; \ vpxor tp, x0, x4; \ vpxor x2, tp, x0; \ vpor x2, x1, x1; #define S3_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, tp; \ vpor x0, x3, x3; \ vpand x0, x1, x4; \ vpxor x2, x0, x0; \ vpxor tp, x2, x2; \ vpand x3, tp, x1; \ vpxor x3, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x4, x4; #define S3_2(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpand x3, x0, x0; \ vpand x4, x3, x3; \ vpxor x2, x3, x3; \ vpor x1, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x4, x4; \ vpxor x3, x0, x0; \ vpxor x2, x3, x3; #define S4_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor x1, x0, x0; \ vpxor tp, x3, x4; \ vpor x0, x2, x2; \ vpxor x1, x2, x2; #define S4_2(x0, x1, x2, x3, x4) \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpand x2, x4, x4; \ vpxor tp, x2, x2; \ vpxor x0, x4, x4; \ vpor x1, tp, x3; \ vpxor RNOT, x1, x1; \ vpxor x0, x3, x3; #define S5_1(x0, x1, x2, x3, x4) \ vpor x0, x1, tp; \ vpxor tp, x2, x2; \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x4; \ vpxor x2, x0, x0; \ vpand x4, tp, x1; \ vpor x3, x4, x4; \ vpxor x0, x4, x4; #define S5_2(x0, x1, x2, x3, x4) \ vpand x3, x0, x0; \ vpxor x3, x1, x1; \ vpxor x2, x3, x3; \ vpxor x1, x0, x0; \ vpand x4, x2, x2; \ vpxor x2, x1, x1; \ vpand x0, x2, x2; \ vpxor x2, x3, x3; #define S6_1(x0, x1, x2, x3, x4) \ vpxor x0, x3, x3; \ vpxor x2, x1, tp; \ vpxor x0, x2, x2; \ vpand x3, x0, x0; \ vpor x3, tp, tp; \ vpxor RNOT, x1, x4; \ vpxor tp, x0, x0; \ vpxor x2, tp, x1; #define S6_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x4, x4; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x3, x3; \ vpxor x2, x1, x1; #define S7_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x1, tp; \ vpxor RNOT, x0, x0; \ vpand x2, tp, x1; \ vpxor x3, x1, x1; \ vpor tp, x3, x3; \ vpxor x2, tp, x4; \ vpxor x3, x2, x2; \ vpxor x0, x3, x3; \ vpor x1, x0, x0; #define S7_2(x0, x1, x2, x3, x4) \ vpand x0, x2, x2; \ vpxor x4, x0, x0; \ vpxor x3, x4, x4; \ vpand x0, x3, x3; \ vpxor x1, x4, x4; \ vpxor x4, x2, x2; \ vpxor x1, x3, x3; \ vpor x0, x4, x4; \ vpxor x1, x4, x4; #define SI0_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpor x1, x3, tp; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpxor tp, x2, x2; \ vpxor x0, tp, x3; \ vpand x1, x0, x0; \ vpxor x2, x0, x0; #define SI0_2(x0, x1, x2, x3, x4) \ vpand x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x3, x2, x2; \ vpxor x3, x1, x1; \ vpand x0, x3, x3; \ vpxor x0, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x4, x4; #define SI1_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, tp; \ vpxor RNOT, x2, x2; \ vpor x1, x0, x4; \ vpxor x3, x4, x4; \ vpand x1, x3, x3; \ vpxor x2, x1, x1; \ vpand x4, x2, x2; #define SI1_2(x0, x1, x2, x3, x4) \ vpxor x1, x4, x4; \ vpor x3, x1, x1; \ vpxor tp, x3, x3; \ vpxor tp, x2, x2; \ vpor x4, tp, x0; \ vpxor x4, x2, x2; \ vpxor x0, x1, x1; \ vpxor x1, x4, x4; #define SI2_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpxor RNOT, x3, tp; \ vpor x2, tp, tp; \ vpxor x3, x2, x2; \ vpxor x0, x3, x4; \ vpxor x1, tp, x3; \ vpor x2, x1, x1; \ vpxor x0, x2, x2; #define SI2_2(x0, x1, x2, x3, x4) \ vpxor x4, x1, x1; \ vpor x3, x4, x4; \ vpxor x3, x2, x2; \ vpxor x2, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; #define SI3_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpand x2, x1, tp; \ vpxor x0, tp, tp; \ vpor x1, x0, x0; \ vpxor x3, x1, x4; \ vpxor x3, x0, x0; \ vpor tp, x3, x3; \ vpxor x2, tp, x1; #define SI3_2(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, x0; \ vpxor x3, x4, x4; \ vpxor x0, x3, x3; \ vpxor x1, x0, x0; #define SI4_1(x0, x1, x2, x3, x4) \ vpxor x3, x2, x2; \ vpand x1, x0, tp; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor RNOT, x0, x4; \ vpxor tp, x1, x1; \ vpxor x2, tp, x0; \ vpand x4, x2, x2; #define SI4_2(x0, x1, x2, x3, x4) \ vpxor x0, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x0, x0; \ vpand x2, x3, x3; \ vpxor x3, x4, x4; \ vpxor x1, x3, x3; \ vpand x0, x1, x1; \ vpxor x1, x4, x4; \ vpxor x3, x0, x0; #define SI5_1(x0, x1, x2, x3, x4) \ vpor x2, x1, tp; \ vpxor x1, x2, x2; \ vpxor x3, tp, tp; \ vpand x1, x3, x3; \ vpxor x3, x2, x2; \ vpor x0, x3, x3; \ vpxor RNOT, x0, x0; \ vpxor x2, x3, x3; \ vpor x0, x2, x2; #define SI5_2(x0, x1, x2, x3, x4) \ vpxor tp, x1, x4; \ vpxor x4, x2, x2; \ vpand x0, x4, x4; \ vpxor tp, x0, x0; \ vpxor x3, tp, x1; \ vpand x2, x0, x0; \ vpxor x3, x2, x2; \ vpxor x2, x0, x0; \ vpxor x4, x2, x2; \ vpxor x3, x4, x4; #define SI6_1(x0, x1, x2, x3, x4) \ vpxor x2, x0, x0; \ vpand x3, x0, tp; \ vpxor x3, x2, x2; \ vpxor x2, tp, tp; \ vpxor x1, x3, x3; \ vpor x0, x2, x2; \ vpxor x3, x2, x2; \ vpand tp, x3, x3; #define SI6_2(x0, x1, x2, x3, x4) \ vpxor RNOT, tp, tp; \ vpxor x1, x3, x3; \ vpand x2, x1, x1; \ vpxor tp, x0, x4; \ vpxor x4, x3, x3; \ vpxor x2, x4, x4; \ vpxor x1, tp, x0; \ vpxor x0, x2, x2; #define SI7_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x2, x0, x0; \ vpor x3, x2, x2; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpor tp, x1, x1; \ vpxor x0, x4, x4; \ vpand x2, x0, x0; \ vpxor x1, x0, x0; #define SI7_2(x0, x1, x2, x3, x4) \ vpand x2, x1, x1; \ vpxor x2, tp, x3; \ vpxor x3, x4, x4; \ vpand x3, x2, x2; \ vpor x0, x3, x3; \ vpxor x4, x1, x1; \ vpxor x4, x3, x3; \ vpand x0, x4, x4; \ vpxor x2, x4, x4; #define get_key(i, j, t) \ vbroadcastss (4*(i)+(j))*4(CTX), t; #define K2(x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ get_key(i, 1, RK1); \ get_key(i, 2, RK2); \ get_key(i, 3, RK3); \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; #define LK2(x0, x1, x2, x3, x4, i) \ vpslld $13, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpslld $13, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpslld $1, x1 ## 1, x4 ## 1; \ vpsrld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ get_key(i, 1, RK1); \ vpslld $1, x1 ## 2, x4 ## 2; \ vpsrld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ get_key(i, 3, RK3); \ vpslld $7, x3 ## 1, x4 ## 1; \ vpsrld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ get_key(i, 0, RK0); \ vpslld $7, x3 ## 2, x4 ## 2; \ vpsrld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ get_key(i, 2, RK2); \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpslld $5, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpslld $22, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpslld $5, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpslld $22, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; #define KL2(x0, x1, x2, x3, x4, i) \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpsrld $5, x0 ## 1, x4 ## 1; \ vpslld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpsrld $22, x2 ## 1, x4 ## 1; \ vpslld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpsrld $5, x0 ## 2, x4 ## 2; \ vpslld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpsrld $22, x2 ## 2, x4 ## 2; \ vpslld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $1, x1 ## 1, x4 ## 1; \ vpslld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ vpsrld $1, x1 ## 2, x4 ## 2; \ vpslld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpsrld $7, x3 ## 1, x4 ## 1; \ vpslld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $7, x3 ## 2, x4 ## 2; \ vpslld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $13, x0 ## 1, x4 ## 1; \ vpslld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $3, x2 ## 1, x4 ## 1; \ vpslld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $13, x0 ## 2, x4 ## 2; \ vpslld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $3, x2 ## 2, x4 ## 2; \ vpslld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; #define S(SBOX, x0, x1, x2, x3, x4) \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); #define SP(SBOX, x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 2, RK2); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 3, RK3); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ get_key(i, 1, RK1); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define read_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 __serpent_enc_blk8_avx: /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 0); S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; ENDPROC(__serpent_enc_blk8_avx) .align 8 __serpent_dec_blk8_avx: /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks * output: * RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2: decrypted blocks */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 32); SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; ENDPROC(__serpent_dec_blk8_avx) ENTRY(serpent_ecb_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_enc_blk8_avx; store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END ret; ENDPROC(serpent_ecb_enc_8way_avx) ENTRY(serpent_ecb_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk8_avx; store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END ret; ENDPROC(serpent_ecb_dec_8way_avx) ENTRY(serpent_cbc_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk8_avx; store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END ret; ENDPROC(serpent_cbc_dec_8way_avx) ENTRY(serpent_ctr_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK1, RK2); call __serpent_enc_blk8_avx; store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END ret; ENDPROC(serpent_ctr_8way_avx) ENTRY(serpent_xts_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN /* regs <= src, dst <= IVs, regs <= regs xor IVs */ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); call __serpent_enc_blk8_avx; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END ret; ENDPROC(serpent_xts_enc_8way_avx) ENTRY(serpent_xts_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN /* regs <= src, dst <= IVs, regs <= regs xor IVs */ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); call __serpent_dec_blk8_avx; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END ret; ENDPROC(serpent_xts_dec_8way_avx)
AirFortressIlikara/LS2K0300-linux-4.19
11,315
arch/x86/crypto/poly1305-sse2-x86_64.S
/* * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> .section .rodata.cst16.ANMASK, "aM", @progbits, 16 .align 16 ANMASK: .octa 0x0000000003ffffff0000000003ffffff .section .rodata.cst16.ORMASK, "aM", @progbits, 16 .align 16 ORMASK: .octa 0x00000000010000000000000001000000 .text #define h0 0x00(%rdi) #define h1 0x04(%rdi) #define h2 0x08(%rdi) #define h3 0x0c(%rdi) #define h4 0x10(%rdi) #define r0 0x00(%rdx) #define r1 0x04(%rdx) #define r2 0x08(%rdx) #define r3 0x0c(%rdx) #define r4 0x10(%rdx) #define s1 0x00(%rsp) #define s2 0x04(%rsp) #define s3 0x08(%rsp) #define s4 0x0c(%rsp) #define m %rsi #define h01 %xmm0 #define h23 %xmm1 #define h44 %xmm2 #define t1 %xmm3 #define t2 %xmm4 #define t3 %xmm5 #define t4 %xmm6 #define mask %xmm7 #define d0 %r8 #define d1 %r9 #define d2 %r10 #define d3 %r11 #define d4 %r12 ENTRY(poly1305_block_sse2) # %rdi: Accumulator h[5] # %rsi: 16 byte input block m # %rdx: Poly1305 key r[5] # %rcx: Block count # This single block variant tries to improve performance by doing two # multiplications in parallel using SSE instructions. There is quite # some quardword packing involved, hence the speedup is marginal. push %rbx push %r12 sub $0x10,%rsp # s1..s4 = r1..r4 * 5 mov r1,%eax lea (%eax,%eax,4),%eax mov %eax,s1 mov r2,%eax lea (%eax,%eax,4),%eax mov %eax,s2 mov r3,%eax lea (%eax,%eax,4),%eax mov %eax,s3 mov r4,%eax lea (%eax,%eax,4),%eax mov %eax,s4 movdqa ANMASK(%rip),mask .Ldoblock: # h01 = [0, h1, 0, h0] # h23 = [0, h3, 0, h2] # h44 = [0, h4, 0, h4] movd h0,h01 movd h1,t1 movd h2,h23 movd h3,t2 movd h4,h44 punpcklqdq t1,h01 punpcklqdq t2,h23 punpcklqdq h44,h44 # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ] movd 0x00(m),t1 movd 0x03(m),t2 psrld $2,t2 punpcklqdq t2,t1 pand mask,t1 paddd t1,h01 # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ] movd 0x06(m),t1 movd 0x09(m),t2 psrld $4,t1 psrld $6,t2 punpcklqdq t2,t1 pand mask,t1 paddd t1,h23 # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ] mov 0x0c(m),%eax shr $8,%eax or $0x01000000,%eax movd %eax,t1 pshufd $0xc4,t1,t1 paddd t1,h44 # t1[0] = h0 * r0 + h2 * s3 # t1[1] = h1 * s4 + h3 * s2 movd r0,t1 movd s4,t2 punpcklqdq t2,t1 pmuludq h01,t1 movd s3,t2 movd s2,t3 punpcklqdq t3,t2 pmuludq h23,t2 paddq t2,t1 # t2[0] = h0 * r1 + h2 * s4 # t2[1] = h1 * r0 + h3 * s3 movd r1,t2 movd r0,t3 punpcklqdq t3,t2 pmuludq h01,t2 movd s4,t3 movd s3,t4 punpcklqdq t4,t3 pmuludq h23,t3 paddq t3,t2 # t3[0] = h4 * s1 # t3[1] = h4 * s2 movd s1,t3 movd s2,t4 punpcklqdq t4,t3 pmuludq h44,t3 # d0 = t1[0] + t1[1] + t3[0] # d1 = t2[0] + t2[1] + t3[1] movdqa t1,t4 punpcklqdq t2,t4 punpckhqdq t2,t1 paddq t4,t1 paddq t3,t1 movq t1,d0 psrldq $8,t1 movq t1,d1 # t1[0] = h0 * r2 + h2 * r0 # t1[1] = h1 * r1 + h3 * s4 movd r2,t1 movd r1,t2 punpcklqdq t2,t1 pmuludq h01,t1 movd r0,t2 movd s4,t3 punpcklqdq t3,t2 pmuludq h23,t2 paddq t2,t1 # t2[0] = h0 * r3 + h2 * r1 # t2[1] = h1 * r2 + h3 * r0 movd r3,t2 movd r2,t3 punpcklqdq t3,t2 pmuludq h01,t2 movd r1,t3 movd r0,t4 punpcklqdq t4,t3 pmuludq h23,t3 paddq t3,t2 # t3[0] = h4 * s3 # t3[1] = h4 * s4 movd s3,t3 movd s4,t4 punpcklqdq t4,t3 pmuludq h44,t3 # d2 = t1[0] + t1[1] + t3[0] # d3 = t2[0] + t2[1] + t3[1] movdqa t1,t4 punpcklqdq t2,t4 punpckhqdq t2,t1 paddq t4,t1 paddq t3,t1 movq t1,d2 psrldq $8,t1 movq t1,d3 # t1[0] = h0 * r4 + h2 * r2 # t1[1] = h1 * r3 + h3 * r1 movd r4,t1 movd r3,t2 punpcklqdq t2,t1 pmuludq h01,t1 movd r2,t2 movd r1,t3 punpcklqdq t3,t2 pmuludq h23,t2 paddq t2,t1 # t3[0] = h4 * r0 movd r0,t3 pmuludq h44,t3 # d4 = t1[0] + t1[1] + t3[0] movdqa t1,t4 psrldq $8,t4 paddq t4,t1 paddq t3,t1 movq t1,d4 # d1 += d0 >> 26 mov d0,%rax shr $26,%rax add %rax,d1 # h0 = d0 & 0x3ffffff mov d0,%rbx and $0x3ffffff,%ebx # d2 += d1 >> 26 mov d1,%rax shr $26,%rax add %rax,d2 # h1 = d1 & 0x3ffffff mov d1,%rax and $0x3ffffff,%eax mov %eax,h1 # d3 += d2 >> 26 mov d2,%rax shr $26,%rax add %rax,d3 # h2 = d2 & 0x3ffffff mov d2,%rax and $0x3ffffff,%eax mov %eax,h2 # d4 += d3 >> 26 mov d3,%rax shr $26,%rax add %rax,d4 # h3 = d3 & 0x3ffffff mov d3,%rax and $0x3ffffff,%eax mov %eax,h3 # h0 += (d4 >> 26) * 5 mov d4,%rax shr $26,%rax lea (%rax,%rax,4),%rax add %rax,%rbx # h4 = d4 & 0x3ffffff mov d4,%rax and $0x3ffffff,%eax mov %eax,h4 # h1 += h0 >> 26 mov %rbx,%rax shr $26,%rax add %eax,h1 # h0 = h0 & 0x3ffffff andl $0x3ffffff,%ebx mov %ebx,h0 add $0x10,m dec %rcx jnz .Ldoblock add $0x10,%rsp pop %r12 pop %rbx ret ENDPROC(poly1305_block_sse2) #define u0 0x00(%r8) #define u1 0x04(%r8) #define u2 0x08(%r8) #define u3 0x0c(%r8) #define u4 0x10(%r8) #define hc0 %xmm0 #define hc1 %xmm1 #define hc2 %xmm2 #define hc3 %xmm5 #define hc4 %xmm6 #define ru0 %xmm7 #define ru1 %xmm8 #define ru2 %xmm9 #define ru3 %xmm10 #define ru4 %xmm11 #define sv1 %xmm12 #define sv2 %xmm13 #define sv3 %xmm14 #define sv4 %xmm15 #undef d0 #define d0 %r13 ENTRY(poly1305_2block_sse2) # %rdi: Accumulator h[5] # %rsi: 16 byte input block m # %rdx: Poly1305 key r[5] # %rcx: Doubleblock count # %r8: Poly1305 derived key r^2 u[5] # This two-block variant further improves performance by using loop # unrolled block processing. This is more straight forward and does # less byte shuffling, but requires a second Poly1305 key r^2: # h = (h + m) * r => h = (h + m1) * r^2 + m2 * r push %rbx push %r12 push %r13 # combine r0,u0 movd u0,ru0 movd r0,t1 punpcklqdq t1,ru0 # combine r1,u1 and s1=r1*5,v1=u1*5 movd u1,ru1 movd r1,t1 punpcklqdq t1,ru1 movdqa ru1,sv1 pslld $2,sv1 paddd ru1,sv1 # combine r2,u2 and s2=r2*5,v2=u2*5 movd u2,ru2 movd r2,t1 punpcklqdq t1,ru2 movdqa ru2,sv2 pslld $2,sv2 paddd ru2,sv2 # combine r3,u3 and s3=r3*5,v3=u3*5 movd u3,ru3 movd r3,t1 punpcklqdq t1,ru3 movdqa ru3,sv3 pslld $2,sv3 paddd ru3,sv3 # combine r4,u4 and s4=r4*5,v4=u4*5 movd u4,ru4 movd r4,t1 punpcklqdq t1,ru4 movdqa ru4,sv4 pslld $2,sv4 paddd ru4,sv4 .Ldoblock2: # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ] movd 0x00(m),hc0 movd 0x10(m),t1 punpcklqdq t1,hc0 pand ANMASK(%rip),hc0 movd h0,t1 paddd t1,hc0 # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ] movd 0x03(m),hc1 movd 0x13(m),t1 punpcklqdq t1,hc1 psrld $2,hc1 pand ANMASK(%rip),hc1 movd h1,t1 paddd t1,hc1 # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ] movd 0x06(m),hc2 movd 0x16(m),t1 punpcklqdq t1,hc2 psrld $4,hc2 pand ANMASK(%rip),hc2 movd h2,t1 paddd t1,hc2 # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ] movd 0x09(m),hc3 movd 0x19(m),t1 punpcklqdq t1,hc3 psrld $6,hc3 pand ANMASK(%rip),hc3 movd h3,t1 paddd t1,hc3 # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ] movd 0x0c(m),hc4 movd 0x1c(m),t1 punpcklqdq t1,hc4 psrld $8,hc4 por ORMASK(%rip),hc4 movd h4,t1 paddd t1,hc4 # t1 = [ hc0[1] * r0, hc0[0] * u0 ] movdqa ru0,t1 pmuludq hc0,t1 # t1 += [ hc1[1] * s4, hc1[0] * v4 ] movdqa sv4,t2 pmuludq hc1,t2 paddq t2,t1 # t1 += [ hc2[1] * s3, hc2[0] * v3 ] movdqa sv3,t2 pmuludq hc2,t2 paddq t2,t1 # t1 += [ hc3[1] * s2, hc3[0] * v2 ] movdqa sv2,t2 pmuludq hc3,t2 paddq t2,t1 # t1 += [ hc4[1] * s1, hc4[0] * v1 ] movdqa sv1,t2 pmuludq hc4,t2 paddq t2,t1 # d0 = t1[0] + t1[1] movdqa t1,t2 psrldq $8,t2 paddq t2,t1 movq t1,d0 # t1 = [ hc0[1] * r1, hc0[0] * u1 ] movdqa ru1,t1 pmuludq hc0,t1 # t1 += [ hc1[1] * r0, hc1[0] * u0 ] movdqa ru0,t2 pmuludq hc1,t2 paddq t2,t1 # t1 += [ hc2[1] * s4, hc2[0] * v4 ] movdqa sv4,t2 pmuludq hc2,t2 paddq t2,t1 # t1 += [ hc3[1] * s3, hc3[0] * v3 ] movdqa sv3,t2 pmuludq hc3,t2 paddq t2,t1 # t1 += [ hc4[1] * s2, hc4[0] * v2 ] movdqa sv2,t2 pmuludq hc4,t2 paddq t2,t1 # d1 = t1[0] + t1[1] movdqa t1,t2 psrldq $8,t2 paddq t2,t1 movq t1,d1 # t1 = [ hc0[1] * r2, hc0[0] * u2 ] movdqa ru2,t1 pmuludq hc0,t1 # t1 += [ hc1[1] * r1, hc1[0] * u1 ] movdqa ru1,t2 pmuludq hc1,t2 paddq t2,t1 # t1 += [ hc2[1] * r0, hc2[0] * u0 ] movdqa ru0,t2 pmuludq hc2,t2 paddq t2,t1 # t1 += [ hc3[1] * s4, hc3[0] * v4 ] movdqa sv4,t2 pmuludq hc3,t2 paddq t2,t1 # t1 += [ hc4[1] * s3, hc4[0] * v3 ] movdqa sv3,t2 pmuludq hc4,t2 paddq t2,t1 # d2 = t1[0] + t1[1] movdqa t1,t2 psrldq $8,t2 paddq t2,t1 movq t1,d2 # t1 = [ hc0[1] * r3, hc0[0] * u3 ] movdqa ru3,t1 pmuludq hc0,t1 # t1 += [ hc1[1] * r2, hc1[0] * u2 ] movdqa ru2,t2 pmuludq hc1,t2 paddq t2,t1 # t1 += [ hc2[1] * r1, hc2[0] * u1 ] movdqa ru1,t2 pmuludq hc2,t2 paddq t2,t1 # t1 += [ hc3[1] * r0, hc3[0] * u0 ] movdqa ru0,t2 pmuludq hc3,t2 paddq t2,t1 # t1 += [ hc4[1] * s4, hc4[0] * v4 ] movdqa sv4,t2 pmuludq hc4,t2 paddq t2,t1 # d3 = t1[0] + t1[1] movdqa t1,t2 psrldq $8,t2 paddq t2,t1 movq t1,d3 # t1 = [ hc0[1] * r4, hc0[0] * u4 ] movdqa ru4,t1 pmuludq hc0,t1 # t1 += [ hc1[1] * r3, hc1[0] * u3 ] movdqa ru3,t2 pmuludq hc1,t2 paddq t2,t1 # t1 += [ hc2[1] * r2, hc2[0] * u2 ] movdqa ru2,t2 pmuludq hc2,t2 paddq t2,t1 # t1 += [ hc3[1] * r1, hc3[0] * u1 ] movdqa ru1,t2 pmuludq hc3,t2 paddq t2,t1 # t1 += [ hc4[1] * r0, hc4[0] * u0 ] movdqa ru0,t2 pmuludq hc4,t2 paddq t2,t1 # d4 = t1[0] + t1[1] movdqa t1,t2 psrldq $8,t2 paddq t2,t1 movq t1,d4 # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small # amount. Careful: we must not assume the carry bits 'd0 >> 26', # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit # integers. It's true in a single-block implementation, but not here. # d1 += d0 >> 26 mov d0,%rax shr $26,%rax add %rax,d1 # h0 = d0 & 0x3ffffff mov d0,%rbx and $0x3ffffff,%ebx # d2 += d1 >> 26 mov d1,%rax shr $26,%rax add %rax,d2 # h1 = d1 & 0x3ffffff mov d1,%rax and $0x3ffffff,%eax mov %eax,h1 # d3 += d2 >> 26 mov d2,%rax shr $26,%rax add %rax,d3 # h2 = d2 & 0x3ffffff mov d2,%rax and $0x3ffffff,%eax mov %eax,h2 # d4 += d3 >> 26 mov d3,%rax shr $26,%rax add %rax,d4 # h3 = d3 & 0x3ffffff mov d3,%rax and $0x3ffffff,%eax mov %eax,h3 # h0 += (d4 >> 26) * 5 mov d4,%rax shr $26,%rax lea (%rax,%rax,4),%rax add %rax,%rbx # h4 = d4 & 0x3ffffff mov d4,%rax and $0x3ffffff,%eax mov %eax,h4 # h1 += h0 >> 26 mov %rbx,%rax shr $26,%rax add %eax,h1 # h0 = h0 & 0x3ffffff andl $0x3ffffff,%ebx mov %ebx,h0 add $0x20,m dec %rcx jnz .Ldoblock2 pop %r13 pop %r12 pop %rbx ret ENDPROC(poly1305_2block_sse2)
AirFortressIlikara/LS2K0300-linux-4.19
17,490
arch/x86/crypto/sha256-avx-asm.S
######################################################################## # Implement fast SHA-256 with AVX1 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-256 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## # This code schedules 1 block at a time, with 4 lanes per block ######################################################################## #ifdef CONFIG_AS_AVX #include <linux/linkage.h> ## assume buffers not aligned #define VMOVDQ vmovdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm .macro MY_ROR p1 p2 shld $(32-(\p1)), \p2, \p2 .endm ################################ # COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask # Load xmm with mem and byte swap each dword .macro COPY_XMM_AND_BSWAP p1 p2 p3 VMOVDQ \p2, \p1 vpshufb \p3, \p1, \p1 .endm ################################ X0 = %xmm4 X1 = %xmm5 X2 = %xmm6 X3 = %xmm7 XTMP0 = %xmm0 XTMP1 = %xmm1 XTMP2 = %xmm2 XTMP3 = %xmm3 XTMP4 = %xmm8 XFER = %xmm9 XTMP5 = %xmm11 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %xmm13 NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx TBL = %r12 a = %eax b = %ebx f = %r9d g = %r10d h = %r11d y0 = %r13d y1 = %r14d y2 = %r15d _INP_END_SIZE = 8 _INP_SIZE = 8 _XFER_SIZE = 16 _XMM_SAVE_SIZE = 0 _INP_END = 0 _INP = _INP_END + _INP_END_SIZE _XFER = _INP + _INP_SIZE _XMM_SAVE = _XFER + _XFER_SIZE STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED ## compute s0 four at a time and s1 two at a time ## compute W[-16] + W[-7] 4 at a time mov e, y0 # y0 = e MY_ROR (25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] MY_ROR (22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16] xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) ## compute s0 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add y0, y2 # y2 = S1 + CH add _XFER(%rsp), y2 # y2 = k + w + S1 + CH mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpsrld $7, XTMP1, XTMP2 or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c vpslld $(32-7), XTMP1, XTMP3 and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS mov e, y0 # y0 = e mov a, y1 # y1 = a MY_ROR (25-11), y0 # y0 = e >> (25-11) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f MY_ROR (22-13), y1 # y1 = a >> (22-13) vpsrld $18, XTMP1, XTMP2 # xor a, y1 # y1 = a ^ (a >> (22-13) MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) vpslld $(32-18), XTMP1, XTMP1 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g vpxor XTMP1, XTMP3, XTMP3 # add y0, y2 # y2 = S1 + CH add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c ## compute low s1 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS mov e, y0 # y0 = e mov a, y1 # y1 = a MY_ROR (25-11), y0 # y0 = e >> (25-11) xor e, y0 # y0 = e ^ (e >> (25-11)) MY_ROR (22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} xor g, y2 # y2 = f^g vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA} MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) vpxor XTMP3, XTMP2, XTMP2 # add y0, y2 # y2 = S1 + CH MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 ## compute high s1 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC} or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS mov e, y0 # y0 = e MY_ROR (25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a MY_ROR (22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC} xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC} xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g vpxor XTMP3, XTMP2, XTMP2 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add y0, y2 # y2 = S1 + CH add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS rotate_Xs .endm ## input is [rsp + _XFER + %1 * 4] .macro DO_ROUND round mov e, y0 # y0 = e MY_ROR (25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a xor e, y0 # y0 = e ^ (e >> (25-11)) MY_ROR (22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) and e, y2 # y2 = (f^g)&e xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g add y0, y2 # y2 = S1 + CH MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) offset = \round * 4 + _XFER # add offset(%rsp), y2 # y2 = k + w + S1 + CH mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) ## arg 1 : pointer to digest ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text ENTRY(sha256_transform_avx) .align 32 pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %rbp movq %rsp, %rbp subq $STACK_SIZE, %rsp # allocate stack space and $~15, %rsp # align stack pointer shl $6, NUM_BLKS # convert to bytes jz done_hash add INP, NUM_BLKS # pointer to end of data mov NUM_BLKS, _INP_END(%rsp) ## load initial digest mov 4*0(CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 loop0: lea K256(%rip), TBL ## byte swap first 16 dwords COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 16 each mov $3, SRND .align 16 loop1: vpaddd (TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddd 1*16(TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddd 2*16(TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddd 3*16(TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) add $4*16, TBL FOUR_ROUNDS_AND_SCHED sub $1, SRND jne loop1 mov $2, SRND loop2: vpaddd (TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vpaddd 1*16(TBL), X1, XFER vmovdqa XFER, _XFER(%rsp) add $2*16, TBL DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vmovdqa X2, X0 vmovdqa X3, X1 sub $1, SRND jne loop2 addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h mov _INP(%rsp), INP add $64, INP cmp _INP_END(%rsp), INP jne loop0 done_hash: mov %rbp, %rsp popq %rbp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx ret ENDPROC(sha256_transform_avx) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .section .rodata.cst16._SHUF_00BA, "aM", @progbits, 16 .align 16 # shuffle xBxA -> 00BA _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 .section .rodata.cst16._SHUF_DC00, "aM", @progbits, 16 .align 16 # shuffle xDxC -> DC00 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif
AirFortressIlikara/LS2K0300-linux-4.19
14,329
arch/x86/crypto/serpent-sse2-i586-asm_32.S
/* * Serpent Cipher 4-way parallel algorithm (i586/SSE2) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on crypto/serpent.c by * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no> * 2003 Herbert Valerio Riedel <hvr@gnu.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> .file "serpent-sse2-i586-asm_32.S" .text #define arg_ctx 4 #define arg_dst 8 #define arg_src 12 #define arg_xor 16 /********************************************************************** 4-way SSE2 serpent **********************************************************************/ #define CTX %edx #define RA %xmm0 #define RB %xmm1 #define RC %xmm2 #define RD %xmm3 #define RE %xmm4 #define RT0 %xmm5 #define RT1 %xmm6 #define RNOT %xmm7 #define get_key(i, j, t) \ movd (4*(i)+(j))*4(CTX), t; \ pshufd $0, t, t; #define K(x0, x1, x2, x3, x4, i) \ get_key(i, 0, x4); \ get_key(i, 1, RT0); \ get_key(i, 2, RT1); \ pxor x4, x0; \ pxor RT0, x1; \ pxor RT1, x2; \ get_key(i, 3, x4); \ pxor x4, x3; #define LK(x0, x1, x2, x3, x4, i) \ movdqa x0, x4; \ pslld $13, x0; \ psrld $(32 - 13), x4; \ por x4, x0; \ pxor x0, x1; \ movdqa x2, x4; \ pslld $3, x2; \ psrld $(32 - 3), x4; \ por x4, x2; \ pxor x2, x1; \ movdqa x1, x4; \ pslld $1, x1; \ psrld $(32 - 1), x4; \ por x4, x1; \ movdqa x0, x4; \ pslld $3, x4; \ pxor x2, x3; \ pxor x4, x3; \ movdqa x3, x4; \ pslld $7, x3; \ psrld $(32 - 7), x4; \ por x4, x3; \ movdqa x1, x4; \ pslld $7, x4; \ pxor x1, x0; \ pxor x3, x0; \ pxor x3, x2; \ pxor x4, x2; \ movdqa x0, x4; \ get_key(i, 1, RT0); \ pxor RT0, x1; \ get_key(i, 3, RT0); \ pxor RT0, x3; \ pslld $5, x0; \ psrld $(32 - 5), x4; \ por x4, x0; \ movdqa x2, x4; \ pslld $22, x2; \ psrld $(32 - 22), x4; \ por x4, x2; \ get_key(i, 0, RT0); \ pxor RT0, x0; \ get_key(i, 2, RT0); \ pxor RT0, x2; #define KL(x0, x1, x2, x3, x4, i) \ K(x0, x1, x2, x3, x4, i); \ movdqa x0, x4; \ psrld $5, x0; \ pslld $(32 - 5), x4; \ por x4, x0; \ movdqa x2, x4; \ psrld $22, x2; \ pslld $(32 - 22), x4; \ por x4, x2; \ pxor x3, x2; \ pxor x3, x0; \ movdqa x1, x4; \ pslld $7, x4; \ pxor x1, x0; \ pxor x4, x2; \ movdqa x1, x4; \ psrld $1, x1; \ pslld $(32 - 1), x4; \ por x4, x1; \ movdqa x3, x4; \ psrld $7, x3; \ pslld $(32 - 7), x4; \ por x4, x3; \ pxor x0, x1; \ movdqa x0, x4; \ pslld $3, x4; \ pxor x4, x3; \ movdqa x0, x4; \ psrld $13, x0; \ pslld $(32 - 13), x4; \ por x4, x0; \ pxor x2, x1; \ pxor x2, x3; \ movdqa x2, x4; \ psrld $3, x2; \ pslld $(32 - 3), x4; \ por x4, x2; #define S0(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ por x0, x3; \ pxor x4, x0; \ pxor x2, x4; \ pxor RNOT, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x4, x1; \ pxor x0, x2; \ pxor x3, x0; \ por x0, x4; \ pxor x2, x0; \ pand x1, x2; \ pxor x2, x3; \ pxor RNOT, x1; \ pxor x4, x2; \ pxor x2, x1; #define S1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x1; \ pxor x3, x0; \ pxor RNOT, x3; \ pand x1, x4; \ por x1, x0; \ pxor x2, x3; \ pxor x3, x0; \ pxor x3, x1; \ pxor x4, x3; \ por x4, x1; \ pxor x2, x4; \ pand x0, x2; \ pxor x1, x2; \ por x0, x1; \ pxor RNOT, x0; \ pxor x2, x0; \ pxor x1, x4; #define S2(x0, x1, x2, x3, x4) \ pxor RNOT, x3; \ pxor x0, x1; \ movdqa x0, x4; \ pand x2, x0; \ pxor x3, x0; \ por x4, x3; \ pxor x1, x2; \ pxor x1, x3; \ pand x0, x1; \ pxor x2, x0; \ pand x3, x2; \ por x1, x3; \ pxor RNOT, x0; \ pxor x0, x3; \ pxor x0, x4; \ pxor x2, x0; \ por x2, x1; #define S3(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x3, x1; \ por x0, x3; \ pand x0, x4; \ pxor x2, x0; \ pxor x1, x2; \ pand x3, x1; \ pxor x3, x2; \ por x4, x0; \ pxor x3, x4; \ pxor x0, x1; \ pand x3, x0; \ pand x4, x3; \ pxor x2, x3; \ por x1, x4; \ pand x1, x2; \ pxor x3, x4; \ pxor x3, x0; \ pxor x2, x3; #define S4(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x4, x0; \ pxor x2, x3; \ por x4, x2; \ pxor x1, x0; \ pxor x3, x4; \ por x0, x2; \ pxor x1, x2; \ pand x0, x1; \ pxor x4, x1; \ pand x2, x4; \ pxor x3, x2; \ pxor x0, x4; \ por x1, x3; \ pxor RNOT, x1; \ pxor x0, x3; #define S5(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x0, x1; \ pxor x1, x2; \ pxor RNOT, x3; \ pxor x0, x4; \ pxor x2, x0; \ pand x4, x1; \ por x3, x4; \ pxor x0, x4; \ pand x3, x0; \ pxor x3, x1; \ pxor x2, x3; \ pxor x1, x0; \ pand x4, x2; \ pxor x2, x1; \ pand x0, x2; \ pxor x2, x3; #define S6(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x3; \ pxor x2, x1; \ pxor x0, x2; \ pand x3, x0; \ por x3, x1; \ pxor RNOT, x4; \ pxor x1, x0; \ pxor x2, x1; \ pxor x4, x3; \ pxor x0, x4; \ pand x0, x2; \ pxor x1, x4; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x3; \ pxor x2, x1; #define S7(x0, x1, x2, x3, x4) \ pxor RNOT, x1; \ movdqa x1, x4; \ pxor RNOT, x0; \ pand x2, x1; \ pxor x3, x1; \ por x4, x3; \ pxor x2, x4; \ pxor x3, x2; \ pxor x0, x3; \ por x1, x0; \ pand x0, x2; \ pxor x4, x0; \ pxor x3, x4; \ pand x0, x3; \ pxor x1, x4; \ pxor x4, x2; \ pxor x1, x3; \ por x0, x4; \ pxor x1, x4; #define SI0(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pxor x0, x1; \ por x1, x3; \ pxor x1, x4; \ pxor RNOT, x0; \ pxor x3, x2; \ pxor x0, x3; \ pand x1, x0; \ pxor x2, x0; \ pand x3, x2; \ pxor x4, x3; \ pxor x3, x2; \ pxor x3, x1; \ pand x0, x3; \ pxor x0, x1; \ pxor x2, x0; \ pxor x3, x4; #define SI1(x0, x1, x2, x3, x4) \ pxor x3, x1; \ movdqa x0, x4; \ pxor x2, x0; \ pxor RNOT, x2; \ por x1, x4; \ pxor x3, x4; \ pand x1, x3; \ pxor x2, x1; \ pand x4, x2; \ pxor x1, x4; \ por x3, x1; \ pxor x0, x3; \ pxor x0, x2; \ por x4, x0; \ pxor x4, x2; \ pxor x0, x1; \ pxor x1, x4; #define SI2(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x3, x4; \ pxor RNOT, x3; \ por x2, x3; \ pxor x4, x2; \ pxor x0, x4; \ pxor x1, x3; \ por x2, x1; \ pxor x0, x2; \ pxor x4, x1; \ por x3, x4; \ pxor x3, x2; \ pxor x2, x4; \ pand x1, x2; \ pxor x3, x2; \ pxor x4, x3; \ pxor x0, x4; #define SI3(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x1, x4; \ pand x2, x1; \ pxor x0, x1; \ por x4, x0; \ pxor x3, x4; \ pxor x3, x0; \ por x1, x3; \ pxor x2, x1; \ pxor x3, x1; \ pxor x2, x0; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x1; \ pand x2, x0; \ pxor x3, x4; \ pxor x0, x3; \ pxor x1, x0; #define SI4(x0, x1, x2, x3, x4) \ pxor x3, x2; \ movdqa x0, x4; \ pand x1, x0; \ pxor x2, x0; \ por x3, x2; \ pxor RNOT, x4; \ pxor x0, x1; \ pxor x2, x0; \ pand x4, x2; \ pxor x0, x2; \ por x4, x0; \ pxor x3, x0; \ pand x2, x3; \ pxor x3, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x1, x4; \ pxor x3, x0; #define SI5(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x2, x1; \ pxor x4, x2; \ pxor x3, x1; \ pand x4, x3; \ pxor x3, x2; \ por x0, x3; \ pxor RNOT, x0; \ pxor x2, x3; \ por x0, x2; \ pxor x1, x4; \ pxor x4, x2; \ pand x0, x4; \ pxor x1, x0; \ pxor x3, x1; \ pand x2, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x4, x2; \ pxor x3, x4; #define SI6(x0, x1, x2, x3, x4) \ pxor x2, x0; \ movdqa x0, x4; \ pand x3, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x1, x3; \ por x4, x2; \ pxor x3, x2; \ pand x0, x3; \ pxor RNOT, x0; \ pxor x1, x3; \ pand x2, x1; \ pxor x0, x4; \ pxor x4, x3; \ pxor x2, x4; \ pxor x1, x0; \ pxor x0, x2; #define SI7(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x2, x0; \ por x4, x2; \ pxor x1, x4; \ pxor RNOT, x0; \ por x3, x1; \ pxor x0, x4; \ pand x2, x0; \ pxor x1, x0; \ pand x2, x1; \ pxor x2, x3; \ pxor x3, x4; \ pand x3, x2; \ por x0, x3; \ pxor x4, x1; \ pxor x4, x3; \ pand x0, x4; \ pxor x2, x4; #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ movdqa x0, t2; \ punpckldq x1, x0; \ punpckhdq x1, t2; \ movdqa x2, t1; \ punpckhdq x3, x2; \ punpckldq x3, t1; \ movdqa x0, x1; \ punpcklqdq t1, x0; \ punpckhqdq t1, x1; \ movdqa t2, x3; \ punpcklqdq x2, t2; \ punpckhqdq x2, x3; \ movdqa t2, x2; #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \ movdqu (0*4*4)(in), x0; \ movdqu (1*4*4)(in), x1; \ movdqu (2*4*4)(in), x2; \ movdqu (3*4*4)(in), x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu x0, (0*4*4)(out); \ movdqu x1, (1*4*4)(out); \ movdqu x2, (2*4*4)(out); \ movdqu x3, (3*4*4)(out); #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu (0*4*4)(out), t0; \ pxor t0, x0; \ movdqu x0, (0*4*4)(out); \ movdqu (1*4*4)(out), t0; \ pxor t0, x1; \ movdqu x1, (1*4*4)(out); \ movdqu (2*4*4)(out), t0; \ pxor t0, x2; \ movdqu x2, (2*4*4)(out); \ movdqu (3*4*4)(out), t0; \ pxor t0, x3; \ movdqu x3, (3*4*4)(out); ENTRY(__serpent_enc_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst * arg_src(%esp): src * arg_xor(%esp): bool, if true: xor output */ pcmpeqd RNOT, RNOT; movl arg_ctx(%esp), CTX; movl arg_src(%esp), %eax; read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); K(RA, RB, RC, RD, RE, 0); S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1); S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2); S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3); S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4); S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5); S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6); S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7); S7(RD, RB, RA, RE, RC); LK(RC, RA, RE, RD, RB, 8); S0(RC, RA, RE, RD, RB); LK(RE, RA, RD, RC, RB, 9); S1(RE, RA, RD, RC, RB); LK(RB, RD, RC, RE, RA, 10); S2(RB, RD, RC, RE, RA); LK(RA, RD, RB, RE, RC, 11); S3(RA, RD, RB, RE, RC); LK(RE, RC, RD, RA, RB, 12); S4(RE, RC, RD, RA, RB); LK(RC, RD, RA, RB, RE, 13); S5(RC, RD, RA, RB, RE); LK(RE, RC, RD, RB, RA, 14); S6(RE, RC, RD, RB, RA); LK(RD, RA, RC, RB, RE, 15); S7(RD, RA, RC, RB, RE); LK(RE, RC, RB, RD, RA, 16); S0(RE, RC, RB, RD, RA); LK(RB, RC, RD, RE, RA, 17); S1(RB, RC, RD, RE, RA); LK(RA, RD, RE, RB, RC, 18); S2(RA, RD, RE, RB, RC); LK(RC, RD, RA, RB, RE, 19); S3(RC, RD, RA, RB, RE); LK(RB, RE, RD, RC, RA, 20); S4(RB, RE, RD, RC, RA); LK(RE, RD, RC, RA, RB, 21); S5(RE, RD, RC, RA, RB); LK(RB, RE, RD, RA, RC, 22); S6(RB, RE, RD, RA, RC); LK(RD, RC, RE, RA, RB, 23); S7(RD, RC, RE, RA, RB); LK(RB, RE, RA, RD, RC, 24); S0(RB, RE, RA, RD, RC); LK(RA, RE, RD, RB, RC, 25); S1(RA, RE, RD, RB, RC); LK(RC, RD, RB, RA, RE, 26); S2(RC, RD, RB, RA, RE); LK(RE, RD, RC, RA, RB, 27); S3(RE, RD, RC, RA, RB); LK(RA, RB, RD, RE, RC, 28); S4(RA, RB, RD, RE, RC); LK(RB, RD, RE, RC, RA, 29); S5(RB, RD, RE, RC, RA); LK(RA, RB, RD, RC, RE, 30); S6(RA, RB, RD, RC, RE); LK(RD, RE, RB, RC, RA, 31); S7(RD, RE, RB, RC, RA); K(RA, RB, RC, RD, RE, 32); movl arg_dst(%esp), %eax; cmpb $0, arg_xor(%esp); jnz .L__enc_xor4; write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); ret; .L__enc_xor4: xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); ret; ENDPROC(__serpent_enc_blk_4way) ENTRY(serpent_dec_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst * arg_src(%esp): src */ pcmpeqd RNOT, RNOT; movl arg_ctx(%esp), CTX; movl arg_src(%esp), %eax; read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); K(RA, RB, RC, RD, RE, 32); SI7(RA, RB, RC, RD, RE); KL(RB, RD, RA, RE, RC, 31); SI6(RB, RD, RA, RE, RC); KL(RA, RC, RE, RB, RD, 30); SI5(RA, RC, RE, RB, RD); KL(RC, RD, RA, RE, RB, 29); SI4(RC, RD, RA, RE, RB); KL(RC, RA, RB, RE, RD, 28); SI3(RC, RA, RB, RE, RD); KL(RB, RC, RD, RE, RA, 27); SI2(RB, RC, RD, RE, RA); KL(RC, RA, RE, RD, RB, 26); SI1(RC, RA, RE, RD, RB); KL(RB, RA, RE, RD, RC, 25); SI0(RB, RA, RE, RD, RC); KL(RE, RC, RA, RB, RD, 24); SI7(RE, RC, RA, RB, RD); KL(RC, RB, RE, RD, RA, 23); SI6(RC, RB, RE, RD, RA); KL(RE, RA, RD, RC, RB, 22); SI5(RE, RA, RD, RC, RB); KL(RA, RB, RE, RD, RC, 21); SI4(RA, RB, RE, RD, RC); KL(RA, RE, RC, RD, RB, 20); SI3(RA, RE, RC, RD, RB); KL(RC, RA, RB, RD, RE, 19); SI2(RC, RA, RB, RD, RE); KL(RA, RE, RD, RB, RC, 18); SI1(RA, RE, RD, RB, RC); KL(RC, RE, RD, RB, RA, 17); SI0(RC, RE, RD, RB, RA); KL(RD, RA, RE, RC, RB, 16); SI7(RD, RA, RE, RC, RB); KL(RA, RC, RD, RB, RE, 15); SI6(RA, RC, RD, RB, RE); KL(RD, RE, RB, RA, RC, 14); SI5(RD, RE, RB, RA, RC); KL(RE, RC, RD, RB, RA, 13); SI4(RE, RC, RD, RB, RA); KL(RE, RD, RA, RB, RC, 12); SI3(RE, RD, RA, RB, RC); KL(RA, RE, RC, RB, RD, 11); SI2(RA, RE, RC, RB, RD); KL(RE, RD, RB, RC, RA, 10); SI1(RE, RD, RB, RC, RA); KL(RA, RD, RB, RC, RE, 9); SI0(RA, RD, RB, RC, RE); KL(RB, RE, RD, RA, RC, 8); SI7(RB, RE, RD, RA, RC); KL(RE, RA, RB, RC, RD, 7); SI6(RE, RA, RB, RC, RD); KL(RB, RD, RC, RE, RA, 6); SI5(RB, RD, RC, RE, RA); KL(RD, RA, RB, RC, RE, 5); SI4(RD, RA, RB, RC, RE); KL(RD, RB, RE, RC, RA, 4); SI3(RD, RB, RE, RC, RA); KL(RE, RD, RA, RC, RB, 3); SI2(RE, RD, RA, RC, RB); KL(RD, RB, RC, RA, RE, 2); SI1(RD, RB, RC, RA, RE); KL(RE, RB, RC, RA, RD, 1); SI0(RE, RB, RC, RA, RD); K(RC, RD, RB, RE, RA, 0); movl arg_dst(%esp), %eax; write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); ret; ENDPROC(serpent_dec_blk_4way)
AirFortressIlikara/LS2K0300-linux-4.19
12,315
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
/* * Implement AES CTR mode by8 optimization with AVX instructions. (x86_64) * * This is AES128/192/256 CTR mode optimization implementation. It requires * the support of Intel(R) AESNI and AVX instructions. * * This work was inspired by the AES CTR mode optimization published * in Intel Optimized IPSEC Cryptograhpic library. * Additional information on it can be found at: * http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972 * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * James Guilford <james.guilford@intel.com> * Sean Gulley <sean.m.gulley@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/linkage.h> #include <asm/inst.h> #define VMOVDQ vmovdqu #define xdata0 %xmm0 #define xdata1 %xmm1 #define xdata2 %xmm2 #define xdata3 %xmm3 #define xdata4 %xmm4 #define xdata5 %xmm5 #define xdata6 %xmm6 #define xdata7 %xmm7 #define xcounter %xmm8 #define xbyteswap %xmm9 #define xkey0 %xmm10 #define xkey4 %xmm11 #define xkey8 %xmm12 #define xkey12 %xmm13 #define xkeyA %xmm14 #define xkeyB %xmm15 #define p_in %rdi #define p_iv %rsi #define p_keys %rdx #define p_out %rcx #define num_bytes %r8 #define tmp %r10 #define DDQ_DATA 0 #define XDATA 1 #define KEY_128 1 #define KEY_192 2 #define KEY_256 3 .section .rodata .align 16 byteswap_const: .octa 0x000102030405060708090A0B0C0D0E0F ddq_low_msk: .octa 0x0000000000000000FFFFFFFFFFFFFFFF ddq_high_add_1: .octa 0x00000000000000010000000000000000 ddq_add_1: .octa 0x00000000000000000000000000000001 ddq_add_2: .octa 0x00000000000000000000000000000002 ddq_add_3: .octa 0x00000000000000000000000000000003 ddq_add_4: .octa 0x00000000000000000000000000000004 ddq_add_5: .octa 0x00000000000000000000000000000005 ddq_add_6: .octa 0x00000000000000000000000000000006 ddq_add_7: .octa 0x00000000000000000000000000000007 ddq_add_8: .octa 0x00000000000000000000000000000008 .text /* generate a unique variable for ddq_add_x */ /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n .endm /* club the numeric 'id' to the symbol 'name' */ .macro club name, id .altmacro .if \name == XDATA setxdata %\id .endif .noaltmacro .endm /* * do_aes num_in_par load_keys key_len * This increments p_in, but not p_out */ .macro do_aes b, k, key_len .set by, \b .set load_keys, \k .set klen, \key_len .if (load_keys) vmovdqa 0*16(p_keys), xkey0 .endif vpshufb xbyteswap, xcounter, xdata0 .set i, 1 .rept (by - 1) club XDATA, i vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata vpaddq ddq_high_add_1(%rip), xcounter, xcounter 1: vpshufb xbyteswap, var_xdata, var_xdata .set i, (i +1) .endr vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter 1: .set i, 1 .rept (by - 1) club XDATA, i vpxor xkey0, var_xdata, var_xdata .set i, (i +1) .endr vmovdqa 2*16(p_keys), xkeyB .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 1 */ .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) vmovdqa 3*16(p_keys), xkey4 .endif .else vmovdqa 3*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyB, var_xdata, var_xdata /* key 2 */ .set i, (i +1) .endr add $(16*by), p_in .if (klen == KEY_128) vmovdqa 4*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 4*16(p_keys), xkey4 .endif .endif .set i, 0 .rept by club XDATA, i /* key 3 */ .if (klen == KEY_128) vaesenc xkey4, var_xdata, var_xdata .else vaesenc xkeyA, var_xdata, var_xdata .endif .set i, (i +1) .endr vmovdqa 5*16(p_keys), xkeyA .set i, 0 .rept by club XDATA, i /* key 4 */ .if (klen == KEY_128) vaesenc xkeyB, var_xdata, var_xdata .else vaesenc xkey4, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) vmovdqa 6*16(p_keys), xkey8 .endif .else vmovdqa 6*16(p_keys), xkeyB .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 5 */ .set i, (i +1) .endr vmovdqa 7*16(p_keys), xkeyA .set i, 0 .rept by club XDATA, i /* key 6 */ .if (klen == KEY_128) vaesenc xkey8, var_xdata, var_xdata .else vaesenc xkeyB, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen == KEY_128) vmovdqa 8*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 8*16(p_keys), xkey8 .endif .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 7 */ .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) vmovdqa 9*16(p_keys), xkey12 .endif .else vmovdqa 9*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i /* key 8 */ .if (klen == KEY_128) vaesenc xkeyB, var_xdata, var_xdata .else vaesenc xkey8, var_xdata, var_xdata .endif .set i, (i +1) .endr vmovdqa 10*16(p_keys), xkeyB .set i, 0 .rept by club XDATA, i /* key 9 */ .if (klen == KEY_128) vaesenc xkey12, var_xdata, var_xdata .else vaesenc xkeyA, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen != KEY_128) vmovdqa 11*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i /* key 10 */ .if (klen == KEY_128) vaesenclast xkeyB, var_xdata, var_xdata .else vaesenc xkeyB, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen != KEY_128) .if (load_keys) vmovdqa 12*16(p_keys), xkey12 .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 11 */ .set i, (i +1) .endr .if (klen == KEY_256) vmovdqa 13*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i .if (klen == KEY_256) /* key 12 */ vaesenc xkey12, var_xdata, var_xdata .else vaesenclast xkey12, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen == KEY_256) vmovdqa 14*16(p_keys), xkeyB .set i, 0 .rept by club XDATA, i /* key 13 */ vaesenc xkeyA, var_xdata, var_xdata .set i, (i +1) .endr .set i, 0 .rept by club XDATA, i /* key 14 */ vaesenclast xkeyB, var_xdata, var_xdata .set i, (i +1) .endr .endif .endif .set i, 0 .rept (by / 2) .set j, (i+1) VMOVDQ (i*16 - 16*by)(p_in), xkeyA VMOVDQ (j*16 - 16*by)(p_in), xkeyB club XDATA, i vpxor xkeyA, var_xdata, var_xdata club XDATA, j vpxor xkeyB, var_xdata, var_xdata .set i, (i+2) .endr .if (i < by) VMOVDQ (i*16 - 16*by)(p_in), xkeyA club XDATA, i vpxor xkeyA, var_xdata, var_xdata .endif .set i, 0 .rept by club XDATA, i VMOVDQ var_xdata, i*16(p_out) .set i, (i+1) .endr .endm .macro do_aes_load val, key_len do_aes \val, 1, \key_len .endm .macro do_aes_noload val, key_len do_aes \val, 0, \key_len .endm /* main body of aes ctr load */ .macro do_aes_ctrmain key_len cmp $16, num_bytes jb .Ldo_return2\key_len vmovdqa byteswap_const(%rip), xbyteswap vmovdqu (p_iv), xcounter vpshufb xbyteswap, xcounter, xcounter mov num_bytes, tmp and $(7*16), tmp jz .Lmult_of_8_blks\key_len /* 1 <= tmp <= 7 */ cmp $(4*16), tmp jg .Lgt4\key_len je .Leq4\key_len .Llt4\key_len: cmp $(2*16), tmp jg .Leq3\key_len je .Leq2\key_len .Leq1\key_len: do_aes_load 1, \key_len add $(1*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Leq2\key_len: do_aes_load 2, \key_len add $(2*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Leq3\key_len: do_aes_load 3, \key_len add $(3*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Leq4\key_len: do_aes_load 4, \key_len add $(4*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Lgt4\key_len: cmp $(6*16), tmp jg .Leq7\key_len je .Leq6\key_len .Leq5\key_len: do_aes_load 5, \key_len add $(5*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Leq6\key_len: do_aes_load 6, \key_len add $(6*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Leq7\key_len: do_aes_load 7, \key_len add $(7*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\key_len jmp .Lmain_loop2\key_len .Lmult_of_8_blks\key_len: .if (\key_len != KEY_128) vmovdqa 0*16(p_keys), xkey0 vmovdqa 4*16(p_keys), xkey4 vmovdqa 8*16(p_keys), xkey8 vmovdqa 12*16(p_keys), xkey12 .else vmovdqa 0*16(p_keys), xkey0 vmovdqa 3*16(p_keys), xkey4 vmovdqa 6*16(p_keys), xkey8 vmovdqa 9*16(p_keys), xkey12 .endif .align 16 .Lmain_loop2\key_len: /* num_bytes is a multiple of 8 and >0 */ do_aes_noload 8, \key_len add $(8*16), p_out sub $(8*16), num_bytes jne .Lmain_loop2\key_len .Ldo_return2\key_len: /* return updated IV */ vpshufb xbyteswap, xcounter, xcounter vmovdqu xcounter, (p_iv) ret .endm /* * routine to do AES128 CTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ ENTRY(aes_ctr_enc_128_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_128 ENDPROC(aes_ctr_enc_128_avx_by8) /* * routine to do AES192 CTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ ENTRY(aes_ctr_enc_192_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_192 ENDPROC(aes_ctr_enc_192_avx_by8) /* * routine to do AES256 CTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ ENTRY(aes_ctr_enc_256_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_256 ENDPROC(aes_ctr_enc_256_avx_by8)
AirFortressIlikara/LS2K0300-linux-4.19
9,345
arch/x86/crypto/twofish-i586-asm_32.S
/*************************************************************************** * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ .file "twofish-i586-asm.S" .text #include <linux/linkage.h> #include <asm/asm-offsets.h> /* return address at 0 */ #define in_blk 12 /* input byte array address parameter*/ #define out_blk 8 /* output byte array address parameter*/ #define ctx 4 /* Twofish context structure */ #define a_offset 0 #define b_offset 4 #define c_offset 8 #define d_offset 12 /* Structure of the crypto context struct*/ #define s0 0 /* S0 Array 256 Words each */ #define s1 1024 /* S1 Array */ #define s2 2048 /* S2 Array */ #define s3 3072 /* S3 Array */ #define w 4096 /* 8 whitening keys (word) */ #define k 4128 /* key 1-32 ( word ) */ /* define a few register aliases to allow macro substitution */ #define R0D %eax #define R0B %al #define R0H %ah #define R1D %ebx #define R1B %bl #define R1H %bh #define R2D %ecx #define R2B %cl #define R2H %ch #define R3D %edx #define R3B %dl #define R3H %dh /* performs input whitening */ #define input_whitening(src,context,offset)\ xor w+offset(context), src; /* performs input whitening */ #define output_whitening(src,context,offset)\ xor w+16+offset(context), src; /* * a input register containing a (rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance */ #define encrypt_round(a,b,c,d,round)\ push d ## D;\ movzx b ## B, %edi;\ mov s1(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ mov s2(%ebp,%edi,4),%esi;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%ebp,%edi,4),d ## D;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%ebp,%edi,4),%esi;\ movzx b ## B, %edi;\ xor s3(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ xor (%ebp,%edi,4), %esi;\ movzx b ## H, %edi;\ ror $15, b ## D;\ xor (%ebp,%edi,4), d ## D;\ movzx a ## H, %edi;\ xor s1(%ebp,%edi,4),%esi;\ pop %edi;\ add d ## D, %esi;\ add %esi, d ## D;\ add k+round(%ebp), %esi;\ xor %esi, c ## D;\ rol $15, c ## D;\ add k+4+round(%ebp),d ## D;\ xor %edi, d ## D; /* * a input register containing a (rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance * last round has different rotations for the output preparation */ #define encrypt_last_round(a,b,c,d,round)\ push d ## D;\ movzx b ## B, %edi;\ mov s1(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ mov s2(%ebp,%edi,4),%esi;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%ebp,%edi,4),d ## D;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%ebp,%edi,4),%esi;\ movzx b ## B, %edi;\ xor s3(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ xor (%ebp,%edi,4), %esi;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%ebp,%edi,4), d ## D;\ movzx a ## H, %edi;\ xor s1(%ebp,%edi,4),%esi;\ pop %edi;\ add d ## D, %esi;\ add %esi, d ## D;\ add k+round(%ebp), %esi;\ xor %esi, c ## D;\ ror $1, c ## D;\ add k+4+round(%ebp),d ## D;\ xor %edi, d ## D; /* * a input register containing a * b input register containing b (rotated 16) * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance */ #define decrypt_round(a,b,c,d,round)\ push c ## D;\ movzx a ## B, %edi;\ mov (%ebp,%edi,4), c ## D;\ movzx b ## B, %edi;\ mov s3(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s1(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%ebp,%edi,4), %esi;\ movzx a ## B, %edi;\ xor s2(%ebp,%edi,4),c ## D;\ movzx b ## B, %edi;\ xor s1(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $15, a ## D;\ xor s3(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ xor s2(%ebp,%edi,4),%esi;\ pop %edi;\ add %esi, c ## D;\ add c ## D, %esi;\ add k+round(%ebp), c ## D;\ xor %edi, c ## D;\ add k+4+round(%ebp),%esi;\ xor %esi, d ## D;\ rol $15, d ## D; /* * a input register containing a * b input register containing b (rotated 16) * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance * last round has different rotations for the output preparation */ #define decrypt_last_round(a,b,c,d,round)\ push c ## D;\ movzx a ## B, %edi;\ mov (%ebp,%edi,4), c ## D;\ movzx b ## B, %edi;\ mov s3(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s1(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%ebp,%edi,4), %esi;\ movzx a ## B, %edi;\ xor s2(%ebp,%edi,4),c ## D;\ movzx b ## B, %edi;\ xor s1(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ xor s2(%ebp,%edi,4),%esi;\ pop %edi;\ add %esi, c ## D;\ add c ## D, %esi;\ add k+round(%ebp), c ## D;\ xor %edi, c ## D;\ add k+4+round(%ebp),%esi;\ xor %esi, d ## D;\ ror $1, d ## D; ENTRY(twofish_enc_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi push %edi mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base * pointer to the ctx address */ mov in_blk+16(%esp),%edi /* input address in edi */ mov (%edi), %eax mov b_offset(%edi), %ebx mov c_offset(%edi), %ecx mov d_offset(%edi), %edx input_whitening(%eax,%ebp,a_offset) ror $16, %eax input_whitening(%ebx,%ebp,b_offset) input_whitening(%ecx,%ebp,c_offset) input_whitening(%edx,%ebp,d_offset) rol $1, %edx encrypt_round(R0,R1,R2,R3,0); encrypt_round(R2,R3,R0,R1,8); encrypt_round(R0,R1,R2,R3,2*8); encrypt_round(R2,R3,R0,R1,3*8); encrypt_round(R0,R1,R2,R3,4*8); encrypt_round(R2,R3,R0,R1,5*8); encrypt_round(R0,R1,R2,R3,6*8); encrypt_round(R2,R3,R0,R1,7*8); encrypt_round(R0,R1,R2,R3,8*8); encrypt_round(R2,R3,R0,R1,9*8); encrypt_round(R0,R1,R2,R3,10*8); encrypt_round(R2,R3,R0,R1,11*8); encrypt_round(R0,R1,R2,R3,12*8); encrypt_round(R2,R3,R0,R1,13*8); encrypt_round(R0,R1,R2,R3,14*8); encrypt_last_round(R2,R3,R0,R1,15*8); output_whitening(%eax,%ebp,c_offset) output_whitening(%ebx,%ebp,d_offset) output_whitening(%ecx,%ebp,a_offset) output_whitening(%edx,%ebp,b_offset) mov out_blk+16(%esp),%edi; mov %eax, c_offset(%edi) mov %ebx, d_offset(%edi) mov %ecx, (%edi) mov %edx, b_offset(%edi) pop %edi pop %esi pop %ebx pop %ebp mov $1, %eax ret ENDPROC(twofish_enc_blk) ENTRY(twofish_dec_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi push %edi mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base * pointer to the ctx address */ mov in_blk+16(%esp),%edi /* input address in edi */ mov (%edi), %eax mov b_offset(%edi), %ebx mov c_offset(%edi), %ecx mov d_offset(%edi), %edx output_whitening(%eax,%ebp,a_offset) output_whitening(%ebx,%ebp,b_offset) ror $16, %ebx output_whitening(%ecx,%ebp,c_offset) output_whitening(%edx,%ebp,d_offset) rol $1, %ecx decrypt_round(R0,R1,R2,R3,15*8); decrypt_round(R2,R3,R0,R1,14*8); decrypt_round(R0,R1,R2,R3,13*8); decrypt_round(R2,R3,R0,R1,12*8); decrypt_round(R0,R1,R2,R3,11*8); decrypt_round(R2,R3,R0,R1,10*8); decrypt_round(R0,R1,R2,R3,9*8); decrypt_round(R2,R3,R0,R1,8*8); decrypt_round(R0,R1,R2,R3,7*8); decrypt_round(R2,R3,R0,R1,6*8); decrypt_round(R0,R1,R2,R3,5*8); decrypt_round(R2,R3,R0,R1,4*8); decrypt_round(R0,R1,R2,R3,3*8); decrypt_round(R2,R3,R0,R1,2*8); decrypt_round(R0,R1,R2,R3,1*8); decrypt_last_round(R2,R3,R0,R1,0); input_whitening(%eax,%ebp,c_offset) input_whitening(%ebx,%ebp,d_offset) input_whitening(%ecx,%ebp,a_offset) input_whitening(%edx,%ebp,b_offset) mov out_blk+16(%esp),%edi; mov %eax, c_offset(%edi) mov %ebx, d_offset(%edi) mov %ecx, (%edi) mov %edx, b_offset(%edi) pop %edi pop %esi pop %ebx pop %ebp mov $1, %eax ret ENDPROC(twofish_dec_blk)
AirFortressIlikara/LS2K0300-linux-4.19
11,783
arch/x86/crypto/morus640-sse2-asm.S
/* * SSE2 implementation of MORUS-640 * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/frame.h> #define SHUFFLE_MASK(i0, i1, i2, i3) \ (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6)) #define MASK1 SHUFFLE_MASK(3, 0, 1, 2) #define MASK2 SHUFFLE_MASK(2, 3, 0, 1) #define MASK3 SHUFFLE_MASK(1, 2, 3, 0) #define STATE0 %xmm0 #define STATE1 %xmm1 #define STATE2 %xmm2 #define STATE3 %xmm3 #define STATE4 %xmm4 #define KEY %xmm5 #define MSG %xmm5 #define T0 %xmm6 #define T1 %xmm7 .section .rodata.cst16.morus640_const, "aM", @progbits, 32 .align 16 .Lmorus640_const_0: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .Lmorus640_const_1: .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst16.morus640_counter, "aM", @progbits, 16 .align 16 .Lmorus640_counter: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .text .macro morus640_round s0, s1, s2, s3, s4, b, w movdqa \s1, T0 pand \s2, T0 pxor T0, \s0 pxor \s3, \s0 movdqa \s0, T0 pslld $\b, T0 psrld $(32 - \b), \s0 pxor T0, \s0 pshufd $\w, \s3, \s3 .endm /* * __morus640_update: internal ABI * input: * STATE[0-4] - input state * MSG - message block * output: * STATE[0-4] - output state * changed: * T0 */ __morus640_update: morus640_round STATE0, STATE1, STATE2, STATE3, STATE4, 5, MASK1 pxor MSG, STATE1 morus640_round STATE1, STATE2, STATE3, STATE4, STATE0, 31, MASK2 pxor MSG, STATE2 morus640_round STATE2, STATE3, STATE4, STATE0, STATE1, 7, MASK3 pxor MSG, STATE3 morus640_round STATE3, STATE4, STATE0, STATE1, STATE2, 22, MASK2 pxor MSG, STATE4 morus640_round STATE4, STATE0, STATE1, STATE2, STATE3, 13, MASK1 ret ENDPROC(__morus640_update) /* * __morus640_update_zero: internal ABI * input: * STATE[0-4] - input state * output: * STATE[0-4] - output state * changed: * T0 */ __morus640_update_zero: morus640_round STATE0, STATE1, STATE2, STATE3, STATE4, 5, MASK1 morus640_round STATE1, STATE2, STATE3, STATE4, STATE0, 31, MASK2 morus640_round STATE2, STATE3, STATE4, STATE0, STATE1, 7, MASK3 morus640_round STATE3, STATE4, STATE0, STATE1, STATE2, 22, MASK2 morus640_round STATE4, STATE0, STATE1, STATE2, STATE3, 13, MASK1 ret ENDPROC(__morus640_update_zero) /* * __load_partial: internal ABI * input: * %rsi - src * %rcx - bytes * output: * MSG - message block * changed: * T0 * %r8 * %r9 */ __load_partial: xor %r9d, %r9d pxor MSG, MSG mov %rcx, %r8 and $0x1, %r8 jz .Lld_partial_1 mov %rcx, %r8 and $0x1E, %r8 add %rsi, %r8 mov (%r8), %r9b .Lld_partial_1: mov %rcx, %r8 and $0x2, %r8 jz .Lld_partial_2 mov %rcx, %r8 and $0x1C, %r8 add %rsi, %r8 shl $16, %r9 mov (%r8), %r9w .Lld_partial_2: mov %rcx, %r8 and $0x4, %r8 jz .Lld_partial_4 mov %rcx, %r8 and $0x18, %r8 add %rsi, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG mov %rcx, %r8 and $0x8, %r8 jz .Lld_partial_8 mov %rcx, %r8 and $0x10, %r8 add %rsi, %r8 pslldq $8, MSG movq (%r8), T0 pxor T0, MSG .Lld_partial_8: ret ENDPROC(__load_partial) /* * __store_partial: internal ABI * input: * %rdx - dst * %rcx - bytes * output: * T0 - message block * changed: * %r8 * %r9 * %r10 */ __store_partial: mov %rcx, %r8 mov %rdx, %r9 movq T0, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) psrldq $8, T0 movq T0, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $16, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: ret ENDPROC(__store_partial) /* * void crypto_morus640_sse2_init(void *state, const void *key, const void *iv); */ ENTRY(crypto_morus640_sse2_init) FRAME_BEGIN /* load IV: */ movdqu (%rdx), STATE0 /* load key: */ movdqu (%rsi), KEY movdqa KEY, STATE1 /* load all ones: */ pcmpeqd STATE2, STATE2 /* load the constants: */ movdqa .Lmorus640_const_0, STATE3 movdqa .Lmorus640_const_1, STATE4 /* update 16 times with zero: */ call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero call __morus640_update_zero /* xor-in the key again after updates: */ pxor KEY, STATE1 /* store the state: */ movdqu STATE0, (0 * 16)(%rdi) movdqu STATE1, (1 * 16)(%rdi) movdqu STATE2, (2 * 16)(%rdi) movdqu STATE3, (3 * 16)(%rdi) movdqu STATE4, (4 * 16)(%rdi) FRAME_END ret ENDPROC(crypto_morus640_sse2_init) /* * void crypto_morus640_sse2_ad(void *state, const void *data, * unsigned int length); */ ENTRY(crypto_morus640_sse2_ad) FRAME_BEGIN cmp $16, %rdx jb .Lad_out /* load the state: */ movdqu (0 * 16)(%rdi), STATE0 movdqu (1 * 16)(%rdi), STATE1 movdqu (2 * 16)(%rdi), STATE2 movdqu (3 * 16)(%rdi), STATE3 movdqu (4 * 16)(%rdi), STATE4 mov %rsi, %r8 and $0xF, %r8 jnz .Lad_u_loop .align 4 .Lad_a_loop: movdqa (%rsi), MSG call __morus640_update sub $16, %rdx add $16, %rsi cmp $16, %rdx jge .Lad_a_loop jmp .Lad_cont .align 4 .Lad_u_loop: movdqu (%rsi), MSG call __morus640_update sub $16, %rdx add $16, %rsi cmp $16, %rdx jge .Lad_u_loop .Lad_cont: /* store the state: */ movdqu STATE0, (0 * 16)(%rdi) movdqu STATE1, (1 * 16)(%rdi) movdqu STATE2, (2 * 16)(%rdi) movdqu STATE3, (3 * 16)(%rdi) movdqu STATE4, (4 * 16)(%rdi) .Lad_out: FRAME_END ret ENDPROC(crypto_morus640_sse2_ad) /* * void crypto_morus640_sse2_enc(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus640_sse2_enc) FRAME_BEGIN cmp $16, %rcx jb .Lenc_out /* load the state: */ movdqu (0 * 16)(%rdi), STATE0 movdqu (1 * 16)(%rdi), STATE1 movdqu (2 * 16)(%rdi), STATE2 movdqu (3 * 16)(%rdi), STATE3 movdqu (4 * 16)(%rdi), STATE4 mov %rsi, %r8 or %rdx, %r8 and $0xF, %r8 jnz .Lenc_u_loop .align 4 .Lenc_a_loop: movdqa (%rsi), MSG movdqa MSG, T0 pxor STATE0, T0 pshufd $MASK3, STATE1, T1 pxor T1, T0 movdqa STATE2, T1 pand STATE3, T1 pxor T1, T0 movdqa T0, (%rdx) call __morus640_update sub $16, %rcx add $16, %rsi add $16, %rdx cmp $16, %rcx jge .Lenc_a_loop jmp .Lenc_cont .align 4 .Lenc_u_loop: movdqu (%rsi), MSG movdqa MSG, T0 pxor STATE0, T0 pshufd $MASK3, STATE1, T1 pxor T1, T0 movdqa STATE2, T1 pand STATE3, T1 pxor T1, T0 movdqu T0, (%rdx) call __morus640_update sub $16, %rcx add $16, %rsi add $16, %rdx cmp $16, %rcx jge .Lenc_u_loop .Lenc_cont: /* store the state: */ movdqu STATE0, (0 * 16)(%rdi) movdqu STATE1, (1 * 16)(%rdi) movdqu STATE2, (2 * 16)(%rdi) movdqu STATE3, (3 * 16)(%rdi) movdqu STATE4, (4 * 16)(%rdi) .Lenc_out: FRAME_END ret ENDPROC(crypto_morus640_sse2_enc) /* * void crypto_morus640_sse2_enc_tail(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus640_sse2_enc_tail) FRAME_BEGIN /* load the state: */ movdqu (0 * 16)(%rdi), STATE0 movdqu (1 * 16)(%rdi), STATE1 movdqu (2 * 16)(%rdi), STATE2 movdqu (3 * 16)(%rdi), STATE3 movdqu (4 * 16)(%rdi), STATE4 /* encrypt message: */ call __load_partial movdqa MSG, T0 pxor STATE0, T0 pshufd $MASK3, STATE1, T1 pxor T1, T0 movdqa STATE2, T1 pand STATE3, T1 pxor T1, T0 call __store_partial call __morus640_update /* store the state: */ movdqu STATE0, (0 * 16)(%rdi) movdqu STATE1, (1 * 16)(%rdi) movdqu STATE2, (2 * 16)(%rdi) movdqu STATE3, (3 * 16)(%rdi) movdqu STATE4, (4 * 16)(%rdi) FRAME_END ret ENDPROC(crypto_morus640_sse2_enc_tail) /* * void crypto_morus640_sse2_dec(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus640_sse2_dec) FRAME_BEGIN cmp $16, %rcx jb .Ldec_out /* load the state: */ movdqu (0 * 16)(%rdi), STATE0 movdqu (1 * 16)(%rdi), STATE1 movdqu (2 * 16)(%rdi), STATE2 movdqu (3 * 16)(%rdi), STATE3 movdqu (4 * 16)(%rdi), STATE4 mov %rsi, %r8 or %rdx, %r8 and $0xF, %r8 jnz .Ldec_u_loop .align 4 .Ldec_a_loop: movdqa (%rsi), MSG pxor STATE0, MSG pshufd $MASK3, STATE1, T0 pxor T0, MSG movdqa STATE2, T0 pand STATE3, T0 pxor T0, MSG movdqa MSG, (%rdx) call __morus640_update sub $16, %rcx add $16, %rsi add $16, %rdx cmp $16, %rcx jge .Ldec_a_loop jmp .Ldec_cont .align 4 .Ldec_u_loop: movdqu (%rsi), MSG pxor STATE0, MSG pshufd $MASK3, STATE1, T0 pxor T0, MSG movdqa STATE2, T0 pand STATE3, T0 pxor T0, MSG movdqu MSG, (%rdx) call __morus640_update sub $16, %rcx add $16, %rsi add $16, %rdx cmp $16, %rcx jge .Ldec_u_loop .Ldec_cont: /* store the state: */ movdqu STATE0, (0 * 16)(%rdi) movdqu STATE1, (1 * 16)(%rdi) movdqu STATE2, (2 * 16)(%rdi) movdqu STATE3, (3 * 16)(%rdi) movdqu STATE4, (4 * 16)(%rdi) .Ldec_out: FRAME_END ret ENDPROC(crypto_morus640_sse2_dec) /* * void crypto_morus640_sse2_dec_tail(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus640_sse2_dec_tail) FRAME_BEGIN /* load the state: */ movdqu (0 * 16)(%rdi), STATE0 movdqu (1 * 16)(%rdi), STATE1 movdqu (2 * 16)(%rdi), STATE2 movdqu (3 * 16)(%rdi), STATE3 movdqu (4 * 16)(%rdi), STATE4 /* decrypt message: */ call __load_partial pxor STATE0, MSG pshufd $MASK3, STATE1, T0 pxor T0, MSG movdqa STATE2, T0 pand STATE3, T0 pxor T0, MSG movdqa MSG, T0 call __store_partial /* mask with byte count: */ movq %rcx, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 movdqa .Lmorus640_counter, T1 pcmpgtb T1, T0 pand T0, MSG call __morus640_update /* store the state: */ movdqu STATE0, (0 * 16)(%rdi) movdqu STATE1, (1 * 16)(%rdi) movdqu STATE2, (2 * 16)(%rdi) movdqu STATE3, (3 * 16)(%rdi) movdqu STATE4, (4 * 16)(%rdi) FRAME_END ret ENDPROC(crypto_morus640_sse2_dec_tail) /* * void crypto_morus640_sse2_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ ENTRY(crypto_morus640_sse2_final) FRAME_BEGIN /* load the state: */ movdqu (0 * 16)(%rdi), STATE0 movdqu (1 * 16)(%rdi), STATE1 movdqu (2 * 16)(%rdi), STATE2 movdqu (3 * 16)(%rdi), STATE3 movdqu (4 * 16)(%rdi), STATE4 /* xor state[0] into state[4]: */ pxor STATE0, STATE4 /* prepare length block: */ movq %rdx, MSG movq %rcx, T0 pslldq $8, T0 pxor T0, MSG psllq $3, MSG /* multiply by 8 (to get bit count) */ /* update state: */ call __morus640_update call __morus640_update call __morus640_update call __morus640_update call __morus640_update call __morus640_update call __morus640_update call __morus640_update call __morus640_update call __morus640_update /* xor tag: */ movdqu (%rsi), MSG pxor STATE0, MSG pshufd $MASK3, STATE1, T0 pxor T0, MSG movdqa STATE2, T0 pand STATE3, T0 pxor T0, MSG movdqu MSG, (%rsi) FRAME_END ret ENDPROC(crypto_morus640_sse2_final)
AirFortressIlikara/LS2K0300-linux-4.19
14,119
arch/x86/crypto/aegis128-aesni-asm.S
/* * AES-NI + SSE2 implementation of AEGIS-128 * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/frame.h> #define STATE0 %xmm0 #define STATE1 %xmm1 #define STATE2 %xmm2 #define STATE3 %xmm3 #define STATE4 %xmm4 #define KEY %xmm5 #define MSG %xmm5 #define T0 %xmm6 #define T1 %xmm7 #define STATEP %rdi #define LEN %rsi #define SRC %rdx #define DST %rcx .section .rodata.cst16.aegis128_const, "aM", @progbits, 32 .align 16 .Laegis128_const_0: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .Laegis128_const_1: .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst16.aegis128_counter, "aM", @progbits, 16 .align 16 .Laegis128_counter: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .text /* * aegis128_update * input: * STATE[0-4] - input state * output: * STATE[0-4] - output state (shifted positions) * changed: * T0 */ .macro aegis128_update movdqa STATE4, T0 aesenc STATE0, STATE4 aesenc STATE1, STATE0 aesenc STATE2, STATE1 aesenc STATE3, STATE2 aesenc T0, STATE3 .endm /* * __load_partial: internal ABI * input: * LEN - bytes * SRC - src * output: * MSG - message block * changed: * T0 * %r8 * %r9 */ __load_partial: xor %r9d, %r9d pxor MSG, MSG mov LEN, %r8 and $0x1, %r8 jz .Lld_partial_1 mov LEN, %r8 and $0x1E, %r8 add SRC, %r8 mov (%r8), %r9b .Lld_partial_1: mov LEN, %r8 and $0x2, %r8 jz .Lld_partial_2 mov LEN, %r8 and $0x1C, %r8 add SRC, %r8 shl $0x10, %r9 mov (%r8), %r9w .Lld_partial_2: mov LEN, %r8 and $0x4, %r8 jz .Lld_partial_4 mov LEN, %r8 and $0x18, %r8 add SRC, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG mov LEN, %r8 and $0x8, %r8 jz .Lld_partial_8 mov LEN, %r8 and $0x10, %r8 add SRC, %r8 pslldq $8, MSG movq (%r8), T0 pxor T0, MSG .Lld_partial_8: ret ENDPROC(__load_partial) /* * __store_partial: internal ABI * input: * LEN - bytes * DST - dst * output: * T0 - message block * changed: * %r8 * %r9 * %r10 */ __store_partial: mov LEN, %r8 mov DST, %r9 movq T0, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) psrldq $8, T0 movq T0, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $0x10, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: ret ENDPROC(__store_partial) /* * void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv); */ ENTRY(crypto_aegis128_aesni_init) FRAME_BEGIN /* load IV: */ movdqu (%rdx), T1 /* load key: */ movdqa (%rsi), KEY pxor KEY, T1 movdqa T1, STATE0 movdqa KEY, STATE3 movdqa KEY, STATE4 /* load the constants: */ movdqa .Laegis128_const_0, STATE2 movdqa .Laegis128_const_1, STATE1 pxor STATE2, STATE3 pxor STATE1, STATE4 /* update 10 times with KEY / KEY xor IV: */ aegis128_update; pxor KEY, STATE4 aegis128_update; pxor T1, STATE3 aegis128_update; pxor KEY, STATE2 aegis128_update; pxor T1, STATE1 aegis128_update; pxor KEY, STATE0 aegis128_update; pxor T1, STATE4 aegis128_update; pxor KEY, STATE3 aegis128_update; pxor T1, STATE2 aegis128_update; pxor KEY, STATE1 aegis128_update; pxor T1, STATE0 /* store the state: */ movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END ret ENDPROC(crypto_aegis128_aesni_init) /* * void crypto_aegis128_aesni_ad(void *state, unsigned int length, * const void *data); */ ENTRY(crypto_aegis128_aesni_ad) FRAME_BEGIN cmp $0x10, LEN jb .Lad_out /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 mov SRC, %r8 and $0xF, %r8 jnz .Lad_u_loop .align 8 .Lad_a_loop: movdqa 0x00(SRC), MSG aegis128_update pxor MSG, STATE4 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_1 movdqa 0x10(SRC), MSG aegis128_update pxor MSG, STATE3 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_2 movdqa 0x20(SRC), MSG aegis128_update pxor MSG, STATE2 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_3 movdqa 0x30(SRC), MSG aegis128_update pxor MSG, STATE1 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_4 movdqa 0x40(SRC), MSG aegis128_update pxor MSG, STATE0 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_0 add $0x50, SRC jmp .Lad_a_loop .align 8 .Lad_u_loop: movdqu 0x00(SRC), MSG aegis128_update pxor MSG, STATE4 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_1 movdqu 0x10(SRC), MSG aegis128_update pxor MSG, STATE3 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_2 movdqu 0x20(SRC), MSG aegis128_update pxor MSG, STATE2 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_3 movdqu 0x30(SRC), MSG aegis128_update pxor MSG, STATE1 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_4 movdqu 0x40(SRC), MSG aegis128_update pxor MSG, STATE0 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_0 add $0x50, SRC jmp .Lad_u_loop /* store the state: */ .Lad_out_0: movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END ret .Lad_out_1: movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END ret .Lad_out_2: movdqu STATE3, 0x00(STATEP) movdqu STATE4, 0x10(STATEP) movdqu STATE0, 0x20(STATEP) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END ret .Lad_out_3: movdqu STATE2, 0x00(STATEP) movdqu STATE3, 0x10(STATEP) movdqu STATE4, 0x20(STATEP) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END ret .Lad_out_4: movdqu STATE1, 0x00(STATEP) movdqu STATE2, 0x10(STATEP) movdqu STATE3, 0x20(STATEP) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END ret .Lad_out: FRAME_END ret ENDPROC(crypto_aegis128_aesni_ad) .macro encrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG movdqa MSG, T0 pxor \s1, T0 pxor \s4, T0 movdqa \s2, T1 pand \s3, T1 pxor T1, T0 movdq\a T0, (\i * 0x10)(DST) aegis128_update pxor MSG, \s4 sub $0x10, LEN cmp $0x10, LEN jl .Lenc_out_\i .endm /* * void crypto_aegis128_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128_aesni_enc) FRAME_BEGIN cmp $0x10, LEN jb .Lenc_out /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 mov SRC, %r8 or DST, %r8 and $0xF, %r8 jnz .Lenc_u_loop .align 8 .Lenc_a_loop: encrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0 encrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1 encrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2 encrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3 encrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Lenc_a_loop .align 8 .Lenc_u_loop: encrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0 encrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1 encrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2 encrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3 encrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Lenc_u_loop /* store the state: */ .Lenc_out_0: movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END ret .Lenc_out_1: movdqu STATE3, 0x00(STATEP) movdqu STATE4, 0x10(STATEP) movdqu STATE0, 0x20(STATEP) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END ret .Lenc_out_2: movdqu STATE2, 0x00(STATEP) movdqu STATE3, 0x10(STATEP) movdqu STATE4, 0x20(STATEP) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END ret .Lenc_out_3: movdqu STATE1, 0x00(STATEP) movdqu STATE2, 0x10(STATEP) movdqu STATE3, 0x20(STATEP) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END ret .Lenc_out_4: movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END ret .Lenc_out: FRAME_END ret ENDPROC(crypto_aegis128_aesni_enc) /* * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128_aesni_enc_tail) FRAME_BEGIN /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 /* encrypt message: */ call __load_partial movdqa MSG, T0 pxor STATE1, T0 pxor STATE4, T0 movdqa STATE2, T1 pand STATE3, T1 pxor T1, T0 call __store_partial aegis128_update pxor MSG, STATE4 /* store the state: */ movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END ret ENDPROC(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG pxor \s1, MSG pxor \s4, MSG movdqa \s2, T1 pand \s3, T1 pxor T1, MSG movdq\a MSG, (\i * 0x10)(DST) aegis128_update pxor MSG, \s4 sub $0x10, LEN cmp $0x10, LEN jl .Ldec_out_\i .endm /* * void crypto_aegis128_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128_aesni_dec) FRAME_BEGIN cmp $0x10, LEN jb .Ldec_out /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 mov SRC, %r8 or DST, %r8 and $0xF, %r8 jnz .Ldec_u_loop .align 8 .Ldec_a_loop: decrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0 decrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1 decrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2 decrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3 decrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Ldec_a_loop .align 8 .Ldec_u_loop: decrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0 decrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1 decrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2 decrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3 decrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Ldec_u_loop /* store the state: */ .Ldec_out_0: movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END ret .Ldec_out_1: movdqu STATE3, 0x00(STATEP) movdqu STATE4, 0x10(STATEP) movdqu STATE0, 0x20(STATEP) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END ret .Ldec_out_2: movdqu STATE2, 0x00(STATEP) movdqu STATE3, 0x10(STATEP) movdqu STATE4, 0x20(STATEP) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END ret .Ldec_out_3: movdqu STATE1, 0x00(STATEP) movdqu STATE2, 0x10(STATEP) movdqu STATE3, 0x20(STATEP) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END ret .Ldec_out_4: movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END ret .Ldec_out: FRAME_END ret ENDPROC(crypto_aegis128_aesni_dec) /* * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ ENTRY(crypto_aegis128_aesni_dec_tail) FRAME_BEGIN /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 /* decrypt message: */ call __load_partial pxor STATE1, MSG pxor STATE4, MSG movdqa STATE2, T1 pand STATE3, T1 pxor T1, MSG movdqa MSG, T0 call __store_partial /* mask with byte count: */ movq LEN, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 movdqa .Laegis128_counter, T1 pcmpgtb T1, T0 pand T0, MSG aegis128_update pxor MSG, STATE4 /* store the state: */ movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END ret ENDPROC(crypto_aegis128_aesni_dec_tail) /* * void crypto_aegis128_aesni_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ ENTRY(crypto_aegis128_aesni_final) FRAME_BEGIN /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 /* prepare length block: */ movq %rdx, MSG movq %rcx, T0 pslldq $8, T0 pxor T0, MSG psllq $3, MSG /* multiply by 8 (to get bit count) */ pxor STATE3, MSG /* update state: */ aegis128_update; pxor MSG, STATE4 aegis128_update; pxor MSG, STATE3 aegis128_update; pxor MSG, STATE2 aegis128_update; pxor MSG, STATE1 aegis128_update; pxor MSG, STATE0 aegis128_update; pxor MSG, STATE4 aegis128_update; pxor MSG, STATE3 /* xor tag: */ movdqu (%rsi), MSG pxor STATE0, MSG pxor STATE1, MSG pxor STATE2, MSG pxor STATE3, MSG pxor STATE4, MSG movdqu MSG, (%rsi) FRAME_END ret ENDPROC(crypto_aegis128_aesni_final)
AirFortressIlikara/LS2K0300-linux-4.19
13,306
arch/x86/crypto/sha512-avx-asm.S
######################################################################## # Implement fast SHA-512 with AVX instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # David Cote <david.m.cote@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-512 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## #ifdef CONFIG_AS_AVX #include <linux/linkage.h> .text # Virtual Registers # ARG1 digest = %rdi # ARG2 msg = %rsi # ARG3 msglen = %rdx T1 = %rcx T2 = %r8 a_64 = %r9 b_64 = %r10 c_64 = %r11 d_64 = %r12 e_64 = %r13 f_64 = %r14 g_64 = %r15 h_64 = %rbx tmp0 = %rax # Local variables (stack frame) # Message Schedule W_SIZE = 80*8 # W[t] + K[t] | W[t+1] + K[t+1] WK_SIZE = 2*8 RSPSAVE_SIZE = 1*8 GPRSAVE_SIZE = 5*8 frame_W = 0 frame_WK = frame_W + W_SIZE frame_RSPSAVE = frame_WK + WK_SIZE frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE frame_size = frame_GPRSAVE + GPRSAVE_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays # WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even # Input message (arg1) #define MSG(i) 8*i(msg) # Output Digest (arg2) #define DIGEST(i) 8*i(digest) # SHA Constants (static mem) #define K_t(i) 8*i+K512(%rip) # Message Schedule (stack frame) #define W_t(i) 8*i+frame_W(%rsp) # W[t]+K[t] (stack frame) #define WK_2(i) 8*((i%2))+frame_WK(%rsp) .macro RotateState # Rotate symbols a..h right TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = TMP .endm .macro RORQ p1 p2 # shld is faster than ror on Sandybridge shld $(64-\p2), \p1, \p1 .endm .macro SHA512_Round rnd # Compute Round %%t mov f_64, T1 # T1 = f mov e_64, tmp0 # tmp = e xor g_64, T1 # T1 = f ^ g RORQ tmp0, 23 # 41 # tmp = e ror 23 and e_64, T1 # T1 = (f ^ g) & e xor e_64, tmp0 # tmp = (e ror 23) ^ e xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g) idx = \rnd add WK_2(idx), T1 # W[t] + K[t] from message scheduler RORQ tmp0, 4 # 18 # tmp = ((e ror 23) ^ e) ror 4 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e mov a_64, T2 # T2 = a add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h RORQ tmp0, 14 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) mov a_64, tmp0 # tmp = a xor c_64, T2 # T2 = a ^ c and c_64, tmp0 # tmp = a & c and b_64, T2 # T2 = (a ^ c) & b xor tmp0, T2 # T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) mov a_64, tmp0 # tmp = a RORQ tmp0, 5 # 39 # tmp = a ror 5 xor a_64, tmp0 # tmp = (a ror 5) ^ a add T1, d_64 # e(next_state) = d + T1 RORQ tmp0, 6 # 34 # tmp = ((a ror 5) ^ a) ror 6 xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c) RORQ tmp0, 28 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) add tmp0, h_64 # a(next_state) = T1 + Maj(a,b,c) S0(a) RotateState .endm .macro SHA512_2Sched_2Round_avx rnd # Compute rounds t-2 and t-1 # Compute message schedule QWORDS t and t+1 # Two rounds are computed based on the values for K[t-2]+W[t-2] and # K[t-1]+W[t-1] which were previously stored at WK_2 by the message # scheduler. # The two new schedule QWORDS are stored at [W_t(t)] and [W_t(t+1)]. # They are then added to their respective SHA512 constants at # [K_t(t)] and [K_t(t+1)] and stored at dqword [WK_2(t)] # For brievity, the comments following vectored instructions only refer to # the first of a pair of QWORDS. # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} # The computation of the message schedule and the rounds are tightly # stitched to take advantage of instruction-level parallelism. idx = \rnd - 2 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2] idx = \rnd - 15 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15] mov f_64, T1 vpsrlq $61, %xmm4, %xmm0 # XMM0 = W[t-2]>>61 mov e_64, tmp0 vpsrlq $1, %xmm5, %xmm6 # XMM6 = W[t-15]>>1 xor g_64, T1 RORQ tmp0, 23 # 41 vpsrlq $19, %xmm4, %xmm1 # XMM1 = W[t-2]>>19 and e_64, T1 xor e_64, tmp0 vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 xor g_64, T1 idx = \rnd add WK_2(idx), T1# vpsrlq $8, %xmm5, %xmm7 # XMM7 = W[t-15]>>8 RORQ tmp0, 4 # 18 vpsrlq $6, %xmm4, %xmm2 # XMM2 = W[t-2]>>6 xor e_64, tmp0 mov a_64, T2 add h_64, T1 vpxor %xmm7, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 RORQ tmp0, 14 # 14 add tmp0, T1 vpsrlq $7, %xmm5, %xmm8 # XMM8 = W[t-15]>>7 mov a_64, tmp0 xor c_64, T2 vpsllq $(64-61), %xmm4, %xmm3 # XMM3 = W[t-2]<<3 and c_64, tmp0 and b_64, T2 vpxor %xmm3, %xmm2, %xmm2 # XMM2 = W[t-2]>>6 ^ W[t-2]<<3 xor tmp0, T2 mov a_64, tmp0 vpsllq $(64-1), %xmm5, %xmm9 # XMM9 = W[t-15]<<63 RORQ tmp0, 5 # 39 vpxor %xmm9, %xmm8, %xmm8 # XMM8 = W[t-15]>>7 ^ W[t-15]<<63 xor a_64, tmp0 add T1, d_64 RORQ tmp0, 6 # 34 xor a_64, tmp0 vpxor %xmm8, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ # W[t-15]>>7 ^ W[t-15]<<63 lea (T1, T2), h_64 RORQ tmp0, 28 # 28 vpsllq $(64-19), %xmm4, %xmm4 # XMM4 = W[t-2]<<25 add tmp0, h_64 RotateState vpxor %xmm4, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ # W[t-2]<<25 mov f_64, T1 vpxor %xmm2, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) mov e_64, tmp0 xor g_64, T1 idx = \rnd - 16 vpaddq W_t(idx), %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] idx = \rnd - 7 vmovdqu W_t(idx), %xmm1 # XMM1 = W[t-7] RORQ tmp0, 23 # 41 and e_64, T1 xor e_64, tmp0 xor g_64, T1 vpsllq $(64-8), %xmm5, %xmm5 # XMM5 = W[t-15]<<56 idx = \rnd + 1 add WK_2(idx), T1 vpxor %xmm5, %xmm6, %xmm6 # XMM6 = s0(W[t-15]) RORQ tmp0, 4 # 18 vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) xor e_64, tmp0 vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] + # s0(W[t-15]) + W[t-16] mov a_64, T2 add h_64, T1 RORQ tmp0, 14 # 14 add tmp0, T1 idx = \rnd vmovdqa %xmm0, W_t(idx) # Store W[t] vpaddq K_t(idx), %xmm0, %xmm0 # Compute W[t]+K[t] vmovdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds mov a_64, tmp0 xor c_64, T2 and c_64, tmp0 and b_64, T2 xor tmp0, T2 mov a_64, tmp0 RORQ tmp0, 5 # 39 xor a_64, tmp0 add T1, d_64 RORQ tmp0, 6 # 34 xor a_64, tmp0 lea (T1, T2), h_64 RORQ tmp0, 28 # 28 add tmp0, h_64 RotateState .endm ######################################################################## # void sha512_transform_avx(void* D, const void* M, u64 L) # Purpose: Updates the SHA512 digest stored at D with the message stored in M. # The size of the message pointed to by M must be an integer multiple of SHA512 # message blocks. # L is the message length in SHA512 blocks ######################################################################## ENTRY(sha512_transform_avx) cmp $0, msglen je nowork # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp and $~(0x20 - 1), %rsp mov %rax, frame_RSPSAVE(%rsp) # Save GPRs mov %rbx, frame_GPRSAVE(%rsp) mov %r12, frame_GPRSAVE +8*1(%rsp) mov %r13, frame_GPRSAVE +8*2(%rsp) mov %r14, frame_GPRSAVE +8*3(%rsp) mov %r15, frame_GPRSAVE +8*4(%rsp) updateblock: # Load state variables mov DIGEST(0), a_64 mov DIGEST(1), b_64 mov DIGEST(2), c_64 mov DIGEST(3), d_64 mov DIGEST(4), e_64 mov DIGEST(5), f_64 mov DIGEST(6), g_64 mov DIGEST(7), h_64 t = 0 .rept 80/2 + 1 # (80 rounds) / (2 rounds/iteration) + (1 iteration) # +1 iteration because the scheduler leads hashing by 1 iteration .if t < 2 # BSWAP 2 QWORDS vmovdqa XMM_QWORD_BSWAP(%rip), %xmm1 vmovdqu MSG(t), %xmm0 vpshufb %xmm1, %xmm0, %xmm0 # BSWAP vmovdqa %xmm0, W_t(t) # Store Scheduled Pair vpaddq K_t(t), %xmm0, %xmm0 # Compute W[t]+K[t] vmovdqa %xmm0, WK_2(t) # Store into WK for rounds .elseif t < 16 # BSWAP 2 QWORDS# Compute 2 Rounds vmovdqu MSG(t), %xmm0 vpshufb %xmm1, %xmm0, %xmm0 # BSWAP SHA512_Round t-2 # Round t-2 vmovdqa %xmm0, W_t(t) # Store Scheduled Pair vpaddq K_t(t), %xmm0, %xmm0 # Compute W[t]+K[t] SHA512_Round t-1 # Round t-1 vmovdqa %xmm0, WK_2(t)# Store W[t]+K[t] into WK .elseif t < 79 # Schedule 2 QWORDS# Compute 2 Rounds SHA512_2Sched_2Round_avx t .else # Compute 2 Rounds SHA512_Round t-2 SHA512_Round t-1 .endif t = t+2 .endr # Update digest add a_64, DIGEST(0) add b_64, DIGEST(1) add c_64, DIGEST(2) add d_64, DIGEST(3) add e_64, DIGEST(4) add f_64, DIGEST(5) add g_64, DIGEST(6) add h_64, DIGEST(7) # Advance to next message block add $16*8, msg dec msglen jnz updateblock # Restore GPRs mov frame_GPRSAVE(%rsp), %rbx mov frame_GPRSAVE +8*1(%rsp), %r12 mov frame_GPRSAVE +8*2(%rsp), %r13 mov frame_GPRSAVE +8*3(%rsp), %r14 mov frame_GPRSAVE +8*4(%rsp), %r15 # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp nowork: ret ENDPROC(sha512_transform_avx) ######################################################################## ### Binary Data .section .rodata.cst16.XMM_QWORD_BSWAP, "aM", @progbits, 16 .align 16 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. XMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 # Mergeable 640-byte rodata section. This allows linker to merge the table # with other, exactly the same 640-byte fragment of another rodata section # (if such section exists). .section .rodata.cst640.K512, "aM", @progbits, 640 .align 64 # K[t] used in SHA512 hashing K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif
AirFortressIlikara/LS2K0300-linux-4.19
10,654
arch/x86/crypto/aes-i586-asm_32.S
// ------------------------------------------------------------------------- // Copyright (c) 2001, Dr Brian Gladman < >, Worcester, UK. // All rights reserved. // // LICENSE TERMS // // The free distribution and use of this software in both source and binary // form is allowed (with or without changes) provided that: // // 1. distributions of this source code include the above copyright // notice, this list of conditions and the following disclaimer// // // 2. distributions in binary form include the above copyright // notice, this list of conditions and the following disclaimer // in the documentation and/or other associated materials// // // 3. the copyright holder's name is not used to endorse products // built using this software without specific written permission. // // // ALTERNATIVELY, provided that this notice is retained in full, this product // may be distributed under the terms of the GNU General Public License (GPL), // in which case the provisions of the GPL apply INSTEAD OF those given above. // // Copyright (c) 2004 Linus Torvalds <torvalds@osdl.org> // Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> // DISCLAIMER // // This software is provided 'as is' with no explicit or implied warranties // in respect of its properties including, but not limited to, correctness // and fitness for purpose. // ------------------------------------------------------------------------- // Issue Date: 29/07/2002 .file "aes-i586-asm.S" .text #include <linux/linkage.h> #include <asm/asm-offsets.h> #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) /* offsets to parameters with one register pushed onto stack */ #define ctx 8 #define out_blk 12 #define in_blk 16 /* offsets in crypto_aes_ctx structure */ #define klen (480) #define ekey (0) #define dkey (240) // register mapping for encrypt and decrypt subroutines #define r0 eax #define r1 ebx #define r2 ecx #define r3 edx #define r4 esi #define r5 edi #define eaxl al #define eaxh ah #define ebxl bl #define ebxh bh #define ecxl cl #define ecxh ch #define edxl dl #define edxh dh #define _h(reg) reg##h #define h(reg) _h(reg) #define _l(reg) reg##l #define l(reg) _l(reg) // This macro takes a 32-bit word representing a column and uses // each of its four bytes to index into four tables of 256 32-bit // words to obtain values that are then xored into the appropriate // output registers r0, r1, r4 or r5. // Parameters: // table table base address // %1 out_state[0] // %2 out_state[1] // %3 out_state[2] // %4 out_state[3] // idx input register for the round (destroyed) // tmp scratch register for the round // sched key schedule #define do_col(table, a1,a2,a3,a4, idx, tmp) \ movzx %l(idx),%tmp; \ xor table(,%tmp,4),%a1; \ movzx %h(idx),%tmp; \ shr $16,%idx; \ xor table+tlen(,%tmp,4),%a2; \ movzx %l(idx),%tmp; \ movzx %h(idx),%idx; \ xor table+2*tlen(,%tmp,4),%a3; \ xor table+3*tlen(,%idx,4),%a4; // initialise output registers from the key schedule // NB1: original value of a3 is in idx on exit // NB2: original values of a1,a2,a4 aren't used #define do_fcol(table, a1,a2,a3,a4, idx, tmp, sched) \ mov 0 sched,%a1; \ movzx %l(idx),%tmp; \ mov 12 sched,%a2; \ xor table(,%tmp,4),%a1; \ mov 4 sched,%a4; \ movzx %h(idx),%tmp; \ shr $16,%idx; \ xor table+tlen(,%tmp,4),%a2; \ movzx %l(idx),%tmp; \ movzx %h(idx),%idx; \ xor table+3*tlen(,%idx,4),%a4; \ mov %a3,%idx; \ mov 8 sched,%a3; \ xor table+2*tlen(,%tmp,4),%a3; // initialise output registers from the key schedule // NB1: original value of a3 is in idx on exit // NB2: original values of a1,a2,a4 aren't used #define do_icol(table, a1,a2,a3,a4, idx, tmp, sched) \ mov 0 sched,%a1; \ movzx %l(idx),%tmp; \ mov 4 sched,%a2; \ xor table(,%tmp,4),%a1; \ mov 12 sched,%a4; \ movzx %h(idx),%tmp; \ shr $16,%idx; \ xor table+tlen(,%tmp,4),%a2; \ movzx %l(idx),%tmp; \ movzx %h(idx),%idx; \ xor table+3*tlen(,%idx,4),%a4; \ mov %a3,%idx; \ mov 8 sched,%a3; \ xor table+2*tlen(,%tmp,4),%a3; // original Gladman had conditional saves to MMX regs. #define save(a1, a2) \ mov %a2,4*a1(%esp) #define restore(a1, a2) \ mov 4*a2(%esp),%a1 // These macros perform a forward encryption cycle. They are entered with // the first previous round column values in r0,r1,r4,r5 and // exit with the final values in the same registers, using stack // for temporary storage. // round column values // on entry: r0,r1,r4,r5 // on exit: r2,r1,r4,r5 #define fwd_rnd1(arg, table) \ save (0,r1); \ save (1,r5); \ \ /* compute new column values */ \ do_fcol(table, r2,r5,r4,r1, r0,r3, arg); /* idx=r0 */ \ do_col (table, r4,r1,r2,r5, r0,r3); /* idx=r4 */ \ restore(r0,0); \ do_col (table, r1,r2,r5,r4, r0,r3); /* idx=r1 */ \ restore(r0,1); \ do_col (table, r5,r4,r1,r2, r0,r3); /* idx=r5 */ // round column values // on entry: r2,r1,r4,r5 // on exit: r0,r1,r4,r5 #define fwd_rnd2(arg, table) \ save (0,r1); \ save (1,r5); \ \ /* compute new column values */ \ do_fcol(table, r0,r5,r4,r1, r2,r3, arg); /* idx=r2 */ \ do_col (table, r4,r1,r0,r5, r2,r3); /* idx=r4 */ \ restore(r2,0); \ do_col (table, r1,r0,r5,r4, r2,r3); /* idx=r1 */ \ restore(r2,1); \ do_col (table, r5,r4,r1,r0, r2,r3); /* idx=r5 */ // These macros performs an inverse encryption cycle. They are entered with // the first previous round column values in r0,r1,r4,r5 and // exit with the final values in the same registers, using stack // for temporary storage // round column values // on entry: r0,r1,r4,r5 // on exit: r2,r1,r4,r5 #define inv_rnd1(arg, table) \ save (0,r1); \ save (1,r5); \ \ /* compute new column values */ \ do_icol(table, r2,r1,r4,r5, r0,r3, arg); /* idx=r0 */ \ do_col (table, r4,r5,r2,r1, r0,r3); /* idx=r4 */ \ restore(r0,0); \ do_col (table, r1,r4,r5,r2, r0,r3); /* idx=r1 */ \ restore(r0,1); \ do_col (table, r5,r2,r1,r4, r0,r3); /* idx=r5 */ // round column values // on entry: r2,r1,r4,r5 // on exit: r0,r1,r4,r5 #define inv_rnd2(arg, table) \ save (0,r1); \ save (1,r5); \ \ /* compute new column values */ \ do_icol(table, r0,r1,r4,r5, r2,r3, arg); /* idx=r2 */ \ do_col (table, r4,r5,r0,r1, r2,r3); /* idx=r4 */ \ restore(r2,0); \ do_col (table, r1,r4,r5,r0, r2,r3); /* idx=r1 */ \ restore(r2,1); \ do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ // AES (Rijndael) Encryption Subroutine /* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */ .extern crypto_ft_tab .extern crypto_fl_tab ENTRY(aes_enc_blk) push %ebp mov ctx(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings 1: push %ebx mov in_blk+4(%esp),%r2 push %esi mov klen(%ebp),%r3 // key size push %edi #if ekey != 0 lea ekey(%ebp),%ebp // key pointer #endif // input four columns and xor in first round key mov (%r2),%r0 mov 4(%r2),%r1 mov 8(%r2),%r4 mov 12(%r2),%r5 xor (%ebp),%r0 xor 4(%ebp),%r1 xor 8(%ebp),%r4 xor 12(%ebp),%r5 sub $8,%esp // space for register saves on stack add $16,%ebp // increment to next round key cmp $24,%r3 jb 4f // 10 rounds for 128-bit key lea 32(%ebp),%ebp je 3f // 12 rounds for 192-bit key lea 32(%ebp),%ebp 2: fwd_rnd1( -64(%ebp), crypto_ft_tab) // 14 rounds for 256-bit key fwd_rnd2( -48(%ebp), crypto_ft_tab) 3: fwd_rnd1( -32(%ebp), crypto_ft_tab) // 12 rounds for 192-bit key fwd_rnd2( -16(%ebp), crypto_ft_tab) 4: fwd_rnd1( (%ebp), crypto_ft_tab) // 10 rounds for 128-bit key fwd_rnd2( +16(%ebp), crypto_ft_tab) fwd_rnd1( +32(%ebp), crypto_ft_tab) fwd_rnd2( +48(%ebp), crypto_ft_tab) fwd_rnd1( +64(%ebp), crypto_ft_tab) fwd_rnd2( +80(%ebp), crypto_ft_tab) fwd_rnd1( +96(%ebp), crypto_ft_tab) fwd_rnd2(+112(%ebp), crypto_ft_tab) fwd_rnd1(+128(%ebp), crypto_ft_tab) fwd_rnd2(+144(%ebp), crypto_fl_tab) // last round uses a different table // move final values to the output array. CAUTION: the // order of these assigns rely on the register mappings add $8,%esp mov out_blk+12(%esp),%ebp mov %r5,12(%ebp) pop %edi mov %r4,8(%ebp) pop %esi mov %r1,4(%ebp) pop %ebx mov %r0,(%ebp) pop %ebp ret ENDPROC(aes_enc_blk) // AES (Rijndael) Decryption Subroutine /* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */ .extern crypto_it_tab .extern crypto_il_tab ENTRY(aes_dec_blk) push %ebp mov ctx(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings 1: push %ebx mov in_blk+4(%esp),%r2 push %esi mov klen(%ebp),%r3 // key size push %edi #if dkey != 0 lea dkey(%ebp),%ebp // key pointer #endif // input four columns and xor in first round key mov (%r2),%r0 mov 4(%r2),%r1 mov 8(%r2),%r4 mov 12(%r2),%r5 xor (%ebp),%r0 xor 4(%ebp),%r1 xor 8(%ebp),%r4 xor 12(%ebp),%r5 sub $8,%esp // space for register saves on stack add $16,%ebp // increment to next round key cmp $24,%r3 jb 4f // 10 rounds for 128-bit key lea 32(%ebp),%ebp je 3f // 12 rounds for 192-bit key lea 32(%ebp),%ebp 2: inv_rnd1( -64(%ebp), crypto_it_tab) // 14 rounds for 256-bit key inv_rnd2( -48(%ebp), crypto_it_tab) 3: inv_rnd1( -32(%ebp), crypto_it_tab) // 12 rounds for 192-bit key inv_rnd2( -16(%ebp), crypto_it_tab) 4: inv_rnd1( (%ebp), crypto_it_tab) // 10 rounds for 128-bit key inv_rnd2( +16(%ebp), crypto_it_tab) inv_rnd1( +32(%ebp), crypto_it_tab) inv_rnd2( +48(%ebp), crypto_it_tab) inv_rnd1( +64(%ebp), crypto_it_tab) inv_rnd2( +80(%ebp), crypto_it_tab) inv_rnd1( +96(%ebp), crypto_it_tab) inv_rnd2(+112(%ebp), crypto_it_tab) inv_rnd1(+128(%ebp), crypto_it_tab) inv_rnd2(+144(%ebp), crypto_il_tab) // last round uses a different table // move final values to the output array. CAUTION: the // order of these assigns rely on the register mappings add $8,%esp mov out_blk+12(%esp),%ebp mov %r5,12(%ebp) pop %edi mov %r4,8(%ebp) pop %esi mov %r1,4(%ebp) pop %ebx mov %r0,(%ebp) pop %ebp ret ENDPROC(aes_dec_blk)
AirFortressIlikara/LS2K0300-linux-4.19
34,446
arch/x86/crypto/camellia-aesni-avx-asm_64.S
/* * x86_64/AVX/AES-NI assembler implementation of Camellia * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ /* * Version licensed under 2-clause BSD License is available at: * http://koti.mbnet.fi/axh/crypto/camellia-BSD-1.2.0-aesni1.tar.xz */ #include <linux/linkage.h> #include <asm/frame.h> #include <asm/nospec-branch.h> #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct camellia_ctx: */ #define key_table 0 #define key_length CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi /********************************************************************** 16-way camellia **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vmovdqa .Linv_shift_row, t4; \ vbroadcastss .L0f0f0f0f, t7; \ vmovdqa .Lpre_tf_lo_s1, t0; \ vmovdqa .Lpre_tf_hi_s1, t1; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ \ /* prefilter sboxes 1, 2 and 3 */ \ vmovdqa .Lpre_tf_lo_s4, t2; \ vmovdqa .Lpre_tf_hi_s4, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x1, t0, t1, t7, t6); \ filter_8bit(x4, t0, t1, t7, t6); \ filter_8bit(x2, t0, t1, t7, t6); \ filter_8bit(x5, t0, t1, t7, t6); \ \ /* prefilter sbox 4 */ \ vpxor t4, t4, t4; \ filter_8bit(x3, t2, t3, t7, t6); \ filter_8bit(x6, t2, t3, t7, t6); \ \ /* AES subbytes + AES shift rows */ \ vmovdqa .Lpost_tf_lo_s1, t0; \ vmovdqa .Lpost_tf_hi_s1, t1; \ vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ \ /* postfilter sboxes 1 and 4 */ \ vmovdqa .Lpost_tf_lo_s3, t2; \ vmovdqa .Lpost_tf_hi_s3, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vmovdqa .Lpost_tf_lo_s2, t4; \ vmovdqa .Lpost_tf_hi_s2, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpxor t6, t6, t6; \ vmovq key, t0; \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ \ vpsrldq $5, t0, t5; \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpsrldq $3, t0, t3; \ vpsrldq $4, t0, t4; \ vpshufb t6, t0, t0; \ vpshufb t6, t1, t1; \ vpshufb t6, t2, t2; \ vpshufb t6, t3, t3; \ vpshufb t6, t4, t4; \ vpsrldq $2, t5, t7; \ vpshufb t6, t7, t7; \ \ /* \ * P-function \ */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* \ * Add key material and result to CD (x becomes new CD) \ */ \ \ vpxor t3, x4, x4; \ vpxor 0 * 16(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 16(mem_cd), x5, x5; \ \ vpsrldq $1, t5, t3; \ vpshufb t6, t5, t5; \ vpshufb t6, t3, t6; \ \ vpxor t1, x6, x6; \ vpxor 2 * 16(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 16(mem_cd), x7, x7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 16(mem_cd), x0, x0; \ \ vpxor t6, x1, x1; \ vpxor 5 * 16(mem_cd), x1, x1; \ \ vpxor t5, x2, x2; \ vpxor 6 * 16(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 16(mem_cd), x3, x3; /* * Size optimization... with inlined roundsm16, binary would be over 5 times * larger and would only be 0.5% faster (on sandy-bridge). */ .align 8 roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rcx, (%r9)); ret; ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, %rax, (%r9)); ret; ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ leaq (key_table + (i) * 8)(CTX), %r9; \ call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ \ vmovdqu x4, 0 * 16(mem_cd); \ vmovdqu x5, 1 * 16(mem_cd); \ vmovdqu x6, 2 * 16(mem_cd); \ vmovdqu x7, 3 * 16(mem_cd); \ vmovdqu x0, 4 * 16(mem_cd); \ vmovdqu x1, 5 * 16(mem_cd); \ vmovdqu x2, 6 * 16(mem_cd); \ vmovdqu x3, 7 * 16(mem_cd); \ \ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \ call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_16(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls16(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpxor tt0, tt0, tt0; \ vmovd kll, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vmovdqu l4, 4 * 16(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 16(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 16(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 16(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vmovd krr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 16(r), t0, t0; \ vpor 5 * 16(r), t1, t1; \ vpor 6 * 16(r), t2, t2; \ vpor 7 * 16(r), t3, t3; \ \ vpxor 0 * 16(r), t0, t0; \ vpxor 1 * 16(r), t1, t1; \ vpxor 2 * 16(r), t2, t2; \ vpxor 3 * 16(r), t3, t3; \ vmovdqu t0, 0 * 16(r); \ vmovdqu t1, 1 * 16(r); \ vmovdqu t2, 2 * 16(r); \ vmovdqu t3, 3 * 16(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vmovd krl, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 16(r), t0, t0; \ vpand 1 * 16(r), t1, t1; \ vpand 2 * 16(r), t2, t2; \ vpand 3 * 16(r), t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 16(r), t0, t0; \ vpxor 5 * 16(r), t1, t1; \ vpxor 6 * 16(r), t2, t2; \ vpxor 7 * 16(r), t3, t3; \ vmovdqu t0, 4 * 16(r); \ vmovdqu t1, 5 * 16(r); \ vmovdqu t2, 6 * 16(r); \ vmovdqu t3, 7 * 16(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vmovd klr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 16(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 16(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 16(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 16(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vmovq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor 0 * 16(rio), x0, y7; \ vpxor 1 * 16(rio), x0, y6; \ vpxor 2 * 16(rio), x0, y5; \ vpxor 3 * 16(rio), x0, y4; \ vpxor 4 * 16(rio), x0, y3; \ vpxor 5 * 16(rio), x0, y2; \ vpxor 6 * 16(rio), x0, y1; \ vpxor 7 * 16(rio), x0, y0; \ vpxor 8 * 16(rio), x0, x7; \ vpxor 9 * 16(rio), x0, x6; \ vpxor 10 * 16(rio), x0, x5; \ vpxor 11 * 16(rio), x0, x4; \ vpxor 12 * 16(rio), x0, x3; \ vpxor 13 * 16(rio), x0, x2; \ vpxor 14 * 16(rio), x0, x1; \ vpxor 15 * 16(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); \ vmovdqu y0, 0 * 16(mem_cd); \ vmovdqu y1, 1 * 16(mem_cd); \ vmovdqu y2, 2 * 16(mem_cd); \ vmovdqu y3, 3 * 16(mem_cd); \ vmovdqu y4, 4 * 16(mem_cd); \ vmovdqu y5, 5 * 16(mem_cd); \ vmovdqu y6, 6 * 16(mem_cd); \ vmovdqu y7, 7 * 16(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, y3, \ y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vmovq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 16(rio); \ vmovdqu x1, 1 * 16(rio); \ vmovdqu x2, 2 * 16(rio); \ vmovdqu x3, 3 * 16(rio); \ vmovdqu x4, 4 * 16(rio); \ vmovdqu x5, 5 * 16(rio); \ vmovdqu x6, 6 * 16(rio); \ vmovdqu x7, 7 * 16(rio); \ vmovdqu y0, 8 * 16(rio); \ vmovdqu y1, 9 * 16(rio); \ vmovdqu y2, 10 * 16(rio); \ vmovdqu y3, 11 * 16(rio); \ vmovdqu y4, 12 * 16(rio); \ vmovdqu y5, 13 * 16(rio); \ vmovdqu y6, 14 * 16(rio); \ vmovdqu y7, 15 * 16(rio); /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ .section .rodata.cst16, "aM", @progbits, 16 .align 16 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); .Lpack_bswap: .long 0x00010203 .long 0x04050607 .long 0x80808080 .long 0x80808080 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For XTS mode IV generation */ .Lxts_gf128mul_and_shl1_mask: .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* 4-bit mask */ .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 .L0f0f0f0f: .long 0x0f0f0f0f .text .align 8 __camellia_enc_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %xmm0..%xmm15: 16 plaintext blocks * output: * %xmm0..%xmm15: 16 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX), ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 8); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX), ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 16); movl $24, %r8d; cmpl $16, key_length(CTX); jne .Lenc_max32; .Lenc_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); FRAME_END ret; .align 8 .Lenc_max32: movl $32, %r8d; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX), ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 24); jmp .Lenc_done; ENDPROC(__camellia_enc_blk16) .align 8 __camellia_dec_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 encrypted blocks * output: * %xmm0..%xmm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); cmpl $32, %r8d; je .Ldec_max32; .Ldec_max24: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 16); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX), ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX)); dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 8); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX), ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX)); dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); FRAME_END ret; .align 8 .Ldec_max32: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 24); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX), ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; ENDPROC(__camellia_dec_blk16) ENTRY(camellia_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ FRAME_BEGIN inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_enc_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END ret; ENDPROC(camellia_ecb_enc_16way) ENTRY(camellia_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ FRAME_BEGIN cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_dec_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END ret; ENDPROC(camellia_ecb_dec_16way) ENTRY(camellia_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ FRAME_BEGIN cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); /* * dst might still be in-use (in case dst == src), so use stack for * temporary storage. */ subq $(16 * 16), %rsp; movq %rsp, %rax; call __camellia_dec_blk16; addq $(16 * 16), %rsp; vpxor (0 * 16)(%rdx), %xmm6, %xmm6; vpxor (1 * 16)(%rdx), %xmm5, %xmm5; vpxor (2 * 16)(%rdx), %xmm4, %xmm4; vpxor (3 * 16)(%rdx), %xmm3, %xmm3; vpxor (4 * 16)(%rdx), %xmm2, %xmm2; vpxor (5 * 16)(%rdx), %xmm1, %xmm1; vpxor (6 * 16)(%rdx), %xmm0, %xmm0; vpxor (7 * 16)(%rdx), %xmm15, %xmm15; vpxor (8 * 16)(%rdx), %xmm14, %xmm14; vpxor (9 * 16)(%rdx), %xmm13, %xmm13; vpxor (10 * 16)(%rdx), %xmm12, %xmm12; vpxor (11 * 16)(%rdx), %xmm11, %xmm11; vpxor (12 * 16)(%rdx), %xmm10, %xmm10; vpxor (13 * 16)(%rdx), %xmm9, %xmm9; vpxor (14 * 16)(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END ret; ENDPROC(camellia_cbc_dec_16way) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; ENTRY(camellia_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN subq $(16 * 16), %rsp; movq %rsp, %rax; vmovdqa .Lbswap128_mask, %xmm14; /* load IV and byteswap */ vmovdqu (%rcx), %xmm0; vpshufb %xmm14, %xmm0, %xmm15; vmovdqu %xmm15, 15 * 16(%rax); vpcmpeqd %xmm15, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */ /* construct IVs */ inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 14 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 13 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm12; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm11; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm10; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm9; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm8; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm7; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm6; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm5; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm4; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm3; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm2; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm13); vmovdqa %xmm0, %xmm13; vpshufb %xmm14, %xmm0, %xmm0; inc_le128(%xmm13, %xmm15, %xmm14); vmovdqu %xmm13, (%rcx); /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor 13 * 16(%rax), %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; addq $(16 * 16), %rsp; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END ret; ENDPROC(camellia_ctr_16way) #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ vpaddq iv, iv, iv; \ vpshufd $0x13, tmp, tmp; \ vpand mask, tmp, tmp; \ vpxor tmp, iv, iv; .align 8 camellia_xts_crypt_16way: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) * %r8: index for input whitening key * %r9: pointer to __camellia_enc_blk16 or __camellia_dec_blk16 */ FRAME_BEGIN subq $(16 * 16), %rsp; movq %rsp, %rax; vmovdqa .Lxts_gf128mul_and_shl1_mask, %xmm14; /* load IV */ vmovdqu (%rcx), %xmm0; vpxor 0 * 16(%rdx), %xmm0, %xmm15; vmovdqu %xmm15, 15 * 16(%rax); vmovdqu %xmm0, 0 * 16(%rsi); /* construct IVs */ gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 1 * 16(%rdx), %xmm0, %xmm15; vmovdqu %xmm15, 14 * 16(%rax); vmovdqu %xmm0, 1 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 2 * 16(%rdx), %xmm0, %xmm13; vmovdqu %xmm0, 2 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 3 * 16(%rdx), %xmm0, %xmm12; vmovdqu %xmm0, 3 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 4 * 16(%rdx), %xmm0, %xmm11; vmovdqu %xmm0, 4 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 5 * 16(%rdx), %xmm0, %xmm10; vmovdqu %xmm0, 5 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 6 * 16(%rdx), %xmm0, %xmm9; vmovdqu %xmm0, 6 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 7 * 16(%rdx), %xmm0, %xmm8; vmovdqu %xmm0, 7 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 8 * 16(%rdx), %xmm0, %xmm7; vmovdqu %xmm0, 8 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 9 * 16(%rdx), %xmm0, %xmm6; vmovdqu %xmm0, 9 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 10 * 16(%rdx), %xmm0, %xmm5; vmovdqu %xmm0, 10 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 11 * 16(%rdx), %xmm0, %xmm4; vmovdqu %xmm0, 11 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 12 * 16(%rdx), %xmm0, %xmm3; vmovdqu %xmm0, 12 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 13 * 16(%rdx), %xmm0, %xmm2; vmovdqu %xmm0, 13 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 14 * 16(%rdx), %xmm0, %xmm1; vmovdqu %xmm0, 14 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vpxor 15 * 16(%rdx), %xmm0, %xmm15; vmovdqu %xmm15, 0 * 16(%rax); vmovdqu %xmm0, 15 * 16(%rsi); gf128mul_x_ble(%xmm0, %xmm14, %xmm15); vmovdqu %xmm0, (%rcx); /* inpack16_pre: */ vmovq (key_table)(CTX, %r8, 8), %xmm15; vpshufb .Lpack_bswap, %xmm15, %xmm15; vpxor 0 * 16(%rax), %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; CALL_NOSPEC %r9; addq $(16 * 16), %rsp; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END ret; ENDPROC(camellia_xts_crypt_16way) ENTRY(camellia_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ xorl %r8d, %r8d; /* input whitening key, 0 for enc */ leaq __camellia_enc_blk16, %r9; jmp camellia_xts_crypt_16way; ENDPROC(camellia_xts_enc_16way) ENTRY(camellia_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* input whitening key, last for dec */ leaq __camellia_dec_blk16, %r9; jmp camellia_xts_crypt_16way; ENDPROC(camellia_xts_dec_16way)
AirFortressIlikara/LS2K0300-linux-4.19
5,346
arch/x86/crypto/glue_helper-asm-avx2.S
/* * Shared glue code for 128bit block ciphers, AVX2 assembler macros * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu (0*32)(src), x0; \ vmovdqu (1*32)(src), x1; \ vmovdqu (2*32)(src), x2; \ vmovdqu (3*32)(src), x3; \ vmovdqu (4*32)(src), x4; \ vmovdqu (5*32)(src), x5; \ vmovdqu (6*32)(src), x6; \ vmovdqu (7*32)(src), x7; #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu x0, (0*32)(dst); \ vmovdqu x1, (1*32)(dst); \ vmovdqu x2, (2*32)(dst); \ vmovdqu x3, (3*32)(dst); \ vmovdqu x4, (4*32)(dst); \ vmovdqu x5, (5*32)(dst); \ vmovdqu x6, (6*32)(dst); \ vmovdqu x7, (7*32)(dst); #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ vpxor t0, t0, t0; \ vinserti128 $1, (src), t0, t0; \ vpxor t0, x0, x0; \ vpxor (0*32+16)(src), x1, x1; \ vpxor (1*32+16)(src), x2, x2; \ vpxor (2*32+16)(src), x3, x3; \ vpxor (3*32+16)(src), x4, x4; \ vpxor (4*32+16)(src), x5, x5; \ vpxor (5*32+16)(src), x6, x6; \ vpxor (6*32+16)(src), x7, x7; \ store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; #define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ vpcmpeqq minus_one, x, tmp1; \ vpcmpeqq minus_two, x, tmp2; \ vpsubq minus_two, x, x; \ vpor tmp2, tmp1, tmp1; \ vpslldq $8, tmp1, tmp1; \ vpsubq tmp1, x, x; #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ t1x, t2, t2x, t3, t3x, t4, t5) \ vpcmpeqd t0, t0, t0; \ vpsrldq $8, t0, t0; /* ab: -1:0 ; cd: -1:0 */ \ vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\ \ /* load IV and byteswap */ \ vmovdqu (iv), t2x; \ vmovdqa t2x, t3x; \ inc_le128(t2x, t0x, t1x); \ vbroadcasti128 bswap, t1; \ vinserti128 $1, t2x, t3, t2; /* ab: le0 ; cd: le1 */ \ vpshufb t1, t2, x0; \ \ /* construct IVs */ \ add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \ vpshufb t1, t2, x1; \ add2_le128(t2, t0, t4, t3, t5); \ vpshufb t1, t2, x2; \ add2_le128(t2, t0, t4, t3, t5); \ vpshufb t1, t2, x3; \ add2_le128(t2, t0, t4, t3, t5); \ vpshufb t1, t2, x4; \ add2_le128(t2, t0, t4, t3, t5); \ vpshufb t1, t2, x5; \ add2_le128(t2, t0, t4, t3, t5); \ vpshufb t1, t2, x6; \ add2_le128(t2, t0, t4, t3, t5); \ vpshufb t1, t2, x7; \ vextracti128 $1, t2, t2x; \ inc_le128(t2x, t0x, t3x); \ vmovdqu t2x, (iv); #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vpxor (0*32)(src), x0, x0; \ vpxor (1*32)(src), x1, x1; \ vpxor (2*32)(src), x2, x2; \ vpxor (3*32)(src), x3, x3; \ vpxor (4*32)(src), x4, x4; \ vpxor (5*32)(src), x5, x5; \ vpxor (6*32)(src), x6, x6; \ vpxor (7*32)(src), x7, x7; \ store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ vpaddq iv, iv, iv; \ vpshufd $0x13, tmp, tmp; \ vpand mask, tmp, tmp; \ vpxor tmp, iv, iv; #define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ vpsrad $31, iv, tmp0; \ vpaddq iv, iv, tmp1; \ vpsllq $2, iv, iv; \ vpshufd $0x13, tmp0, tmp0; \ vpsrad $31, tmp1, tmp1; \ vpand mask2, tmp0, tmp0; \ vpshufd $0x13, tmp1, tmp1; \ vpxor tmp0, iv, iv; \ vpand mask1, tmp1, tmp1; \ vpxor tmp1, iv, iv; #define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \ tivx, t0, t0x, t1, t1x, t2, t2x, t3, \ xts_gf128mul_and_shl1_mask_0, \ xts_gf128mul_and_shl1_mask_1) \ vbroadcasti128 xts_gf128mul_and_shl1_mask_0, t1; \ \ /* load IV and construct second IV */ \ vmovdqu (iv), tivx; \ vmovdqa tivx, t0x; \ gf128mul_x_ble(tivx, t1x, t2x); \ vbroadcasti128 xts_gf128mul_and_shl1_mask_1, t2; \ vinserti128 $1, tivx, t0, tiv; \ vpxor (0*32)(src), tiv, x0; \ vmovdqu tiv, (0*32)(dst); \ \ /* construct and store IVs, also xor with source */ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (1*32)(src), tiv, x1; \ vmovdqu tiv, (1*32)(dst); \ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (2*32)(src), tiv, x2; \ vmovdqu tiv, (2*32)(dst); \ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (3*32)(src), tiv, x3; \ vmovdqu tiv, (3*32)(dst); \ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (4*32)(src), tiv, x4; \ vmovdqu tiv, (4*32)(dst); \ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (5*32)(src), tiv, x5; \ vmovdqu tiv, (5*32)(dst); \ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (6*32)(src), tiv, x6; \ vmovdqu tiv, (6*32)(dst); \ \ gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ vpxor (7*32)(src), tiv, x7; \ vmovdqu tiv, (7*32)(dst); \ \ vextracti128 $1, tiv, tivx; \ gf128mul_x_ble(tivx, t1x, t2x); \ vmovdqu tivx, (iv); #define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vpxor (0*32)(dst), x0, x0; \ vpxor (1*32)(dst), x1, x1; \ vpxor (2*32)(dst), x2, x2; \ vpxor (3*32)(dst), x3, x3; \ vpxor (4*32)(dst), x4, x4; \ vpxor (5*32)(dst), x5, x5; \ vpxor (6*32)(dst), x6, x6; \ vpxor (7*32)(dst), x7, x7; \ store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
AirFortressIlikara/LS2K0300-linux-4.19
12,928
arch/x86/crypto/chacha20-avx2-x86_64.S
/* * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> .section .rodata.cst32.ROT8, "aM", @progbits, 32 .align 32 ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 .octa 0x0e0d0c0f0a09080b0605040702010003 .section .rodata.cst32.ROT16, "aM", @progbits, 32 .align 32 ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 .octa 0x0d0c0f0e09080b0a0504070601000302 .section .rodata.cst32.CTRINC, "aM", @progbits, 32 .align 32 CTRINC: .octa 0x00000003000000020000000100000000 .octa 0x00000007000000060000000500000004 .text ENTRY(chacha20_8block_xor_avx2) # %rdi: Input state matrix, s # %rsi: 8 data blocks output, o # %rdx: 8 data blocks input, i # This function encrypts eight consecutive ChaCha20 blocks by loading # the state matrix in AVX registers eight times. As we need some # scratch registers, we save the first four registers on the stack. The # algorithm performs each operation on the corresponding word of each # state matrix, hence requires no word shuffling. For final XORing step # we transpose the matrix by interleaving 32-, 64- and then 128-bit # words, which allows us to do XOR in AVX registers. 8/16-bit word # rotation is done with the slightly better performing byte shuffling, # 7/12-bit word rotation uses traditional shift+OR. vzeroupper # 4 * 32 byte stack, 32-byte aligned lea 8(%rsp),%r10 and $~31, %rsp sub $0x80, %rsp # x0..15[0-7] = s[0..15] vpbroadcastd 0x00(%rdi),%ymm0 vpbroadcastd 0x04(%rdi),%ymm1 vpbroadcastd 0x08(%rdi),%ymm2 vpbroadcastd 0x0c(%rdi),%ymm3 vpbroadcastd 0x10(%rdi),%ymm4 vpbroadcastd 0x14(%rdi),%ymm5 vpbroadcastd 0x18(%rdi),%ymm6 vpbroadcastd 0x1c(%rdi),%ymm7 vpbroadcastd 0x20(%rdi),%ymm8 vpbroadcastd 0x24(%rdi),%ymm9 vpbroadcastd 0x28(%rdi),%ymm10 vpbroadcastd 0x2c(%rdi),%ymm11 vpbroadcastd 0x30(%rdi),%ymm12 vpbroadcastd 0x34(%rdi),%ymm13 vpbroadcastd 0x38(%rdi),%ymm14 vpbroadcastd 0x3c(%rdi),%ymm15 # x0..3 on stack vmovdqa %ymm0,0x00(%rsp) vmovdqa %ymm1,0x20(%rsp) vmovdqa %ymm2,0x40(%rsp) vmovdqa %ymm3,0x60(%rsp) vmovdqa CTRINC(%rip),%ymm1 vmovdqa ROT8(%rip),%ymm2 vmovdqa ROT16(%rip),%ymm3 # x12 += counter values 0-3 vpaddd %ymm1,%ymm12,%ymm12 mov $10,%ecx .Ldoubleround8: # x0 += x4, x12 = rotl32(x12 ^ x0, 16) vpaddd 0x00(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm3,%ymm12,%ymm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 16) vpaddd 0x20(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm3,%ymm13,%ymm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) vpaddd 0x40(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm3,%ymm14,%ymm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 16) vpaddd 0x60(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm3,%ymm15,%ymm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 12) vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $12,%ymm4,%ymm0 vpsrld $20,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 12) vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $12,%ymm5,%ymm0 vpsrld $20,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $12,%ymm6,%ymm0 vpsrld $20,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 12) vpaddd %ymm15,%ymm11,%ymm11 vpxor %ymm11,%ymm7,%ymm7 vpslld $12,%ymm7,%ymm0 vpsrld $20,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x0 += x4, x12 = rotl32(x12 ^ x0, 8) vpaddd 0x00(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm2,%ymm12,%ymm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 8) vpaddd 0x20(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm2,%ymm13,%ymm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) vpaddd 0x40(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm2,%ymm14,%ymm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 8) vpaddd 0x60(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm2,%ymm15,%ymm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 7) vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm0 vpsrld $25,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 7) vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm0 vpsrld $25,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm0 vpsrld $25,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 7) vpaddd %ymm15,%ymm11,%ymm11 vpxor %ymm11,%ymm7,%ymm7 vpslld $7,%ymm7,%ymm0 vpsrld $25,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x0 += x5, x15 = rotl32(x15 ^ x0, 16) vpaddd 0x00(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm3,%ymm15,%ymm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 vpaddd 0x20(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm3,%ymm12,%ymm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 16) vpaddd 0x40(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm3,%ymm13,%ymm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 16) vpaddd 0x60(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm3,%ymm14,%ymm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 12) vpaddd %ymm15,%ymm10,%ymm10 vpxor %ymm10,%ymm5,%ymm5 vpslld $12,%ymm5,%ymm0 vpsrld $20,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) vpaddd %ymm12,%ymm11,%ymm11 vpxor %ymm11,%ymm6,%ymm6 vpslld $12,%ymm6,%ymm0 vpsrld $20,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 12) vpaddd %ymm13,%ymm8,%ymm8 vpxor %ymm8,%ymm7,%ymm7 vpslld $12,%ymm7,%ymm0 vpsrld $20,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 12) vpaddd %ymm14,%ymm9,%ymm9 vpxor %ymm9,%ymm4,%ymm4 vpslld $12,%ymm4,%ymm0 vpsrld $20,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 # x0 += x5, x15 = rotl32(x15 ^ x0, 8) vpaddd 0x00(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm2,%ymm15,%ymm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) vpaddd 0x20(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm2,%ymm12,%ymm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 8) vpaddd 0x40(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm2,%ymm13,%ymm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 8) vpaddd 0x60(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm2,%ymm14,%ymm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 7) vpaddd %ymm15,%ymm10,%ymm10 vpxor %ymm10,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm0 vpsrld $25,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) vpaddd %ymm12,%ymm11,%ymm11 vpxor %ymm11,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm0 vpsrld $25,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 7) vpaddd %ymm13,%ymm8,%ymm8 vpxor %ymm8,%ymm7,%ymm7 vpslld $7,%ymm7,%ymm0 vpsrld $25,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 7) vpaddd %ymm14,%ymm9,%ymm9 vpxor %ymm9,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm0 vpsrld $25,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 dec %ecx jnz .Ldoubleround8 # x0..15[0-3] += s[0..15] vpbroadcastd 0x00(%rdi),%ymm0 vpaddd 0x00(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpbroadcastd 0x04(%rdi),%ymm0 vpaddd 0x20(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpbroadcastd 0x08(%rdi),%ymm0 vpaddd 0x40(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpbroadcastd 0x0c(%rdi),%ymm0 vpaddd 0x60(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpbroadcastd 0x10(%rdi),%ymm0 vpaddd %ymm0,%ymm4,%ymm4 vpbroadcastd 0x14(%rdi),%ymm0 vpaddd %ymm0,%ymm5,%ymm5 vpbroadcastd 0x18(%rdi),%ymm0 vpaddd %ymm0,%ymm6,%ymm6 vpbroadcastd 0x1c(%rdi),%ymm0 vpaddd %ymm0,%ymm7,%ymm7 vpbroadcastd 0x20(%rdi),%ymm0 vpaddd %ymm0,%ymm8,%ymm8 vpbroadcastd 0x24(%rdi),%ymm0 vpaddd %ymm0,%ymm9,%ymm9 vpbroadcastd 0x28(%rdi),%ymm0 vpaddd %ymm0,%ymm10,%ymm10 vpbroadcastd 0x2c(%rdi),%ymm0 vpaddd %ymm0,%ymm11,%ymm11 vpbroadcastd 0x30(%rdi),%ymm0 vpaddd %ymm0,%ymm12,%ymm12 vpbroadcastd 0x34(%rdi),%ymm0 vpaddd %ymm0,%ymm13,%ymm13 vpbroadcastd 0x38(%rdi),%ymm0 vpaddd %ymm0,%ymm14,%ymm14 vpbroadcastd 0x3c(%rdi),%ymm0 vpaddd %ymm0,%ymm15,%ymm15 # x12 += counter values 0-3 vpaddd %ymm1,%ymm12,%ymm12 # interleave 32-bit words in state n, n+1 vmovdqa 0x00(%rsp),%ymm0 vmovdqa 0x20(%rsp),%ymm1 vpunpckldq %ymm1,%ymm0,%ymm2 vpunpckhdq %ymm1,%ymm0,%ymm1 vmovdqa %ymm2,0x00(%rsp) vmovdqa %ymm1,0x20(%rsp) vmovdqa 0x40(%rsp),%ymm0 vmovdqa 0x60(%rsp),%ymm1 vpunpckldq %ymm1,%ymm0,%ymm2 vpunpckhdq %ymm1,%ymm0,%ymm1 vmovdqa %ymm2,0x40(%rsp) vmovdqa %ymm1,0x60(%rsp) vmovdqa %ymm4,%ymm0 vpunpckldq %ymm5,%ymm0,%ymm4 vpunpckhdq %ymm5,%ymm0,%ymm5 vmovdqa %ymm6,%ymm0 vpunpckldq %ymm7,%ymm0,%ymm6 vpunpckhdq %ymm7,%ymm0,%ymm7 vmovdqa %ymm8,%ymm0 vpunpckldq %ymm9,%ymm0,%ymm8 vpunpckhdq %ymm9,%ymm0,%ymm9 vmovdqa %ymm10,%ymm0 vpunpckldq %ymm11,%ymm0,%ymm10 vpunpckhdq %ymm11,%ymm0,%ymm11 vmovdqa %ymm12,%ymm0 vpunpckldq %ymm13,%ymm0,%ymm12 vpunpckhdq %ymm13,%ymm0,%ymm13 vmovdqa %ymm14,%ymm0 vpunpckldq %ymm15,%ymm0,%ymm14 vpunpckhdq %ymm15,%ymm0,%ymm15 # interleave 64-bit words in state n, n+2 vmovdqa 0x00(%rsp),%ymm0 vmovdqa 0x40(%rsp),%ymm2 vpunpcklqdq %ymm2,%ymm0,%ymm1 vpunpckhqdq %ymm2,%ymm0,%ymm2 vmovdqa %ymm1,0x00(%rsp) vmovdqa %ymm2,0x40(%rsp) vmovdqa 0x20(%rsp),%ymm0 vmovdqa 0x60(%rsp),%ymm2 vpunpcklqdq %ymm2,%ymm0,%ymm1 vpunpckhqdq %ymm2,%ymm0,%ymm2 vmovdqa %ymm1,0x20(%rsp) vmovdqa %ymm2,0x60(%rsp) vmovdqa %ymm4,%ymm0 vpunpcklqdq %ymm6,%ymm0,%ymm4 vpunpckhqdq %ymm6,%ymm0,%ymm6 vmovdqa %ymm5,%ymm0 vpunpcklqdq %ymm7,%ymm0,%ymm5 vpunpckhqdq %ymm7,%ymm0,%ymm7 vmovdqa %ymm8,%ymm0 vpunpcklqdq %ymm10,%ymm0,%ymm8 vpunpckhqdq %ymm10,%ymm0,%ymm10 vmovdqa %ymm9,%ymm0 vpunpcklqdq %ymm11,%ymm0,%ymm9 vpunpckhqdq %ymm11,%ymm0,%ymm11 vmovdqa %ymm12,%ymm0 vpunpcklqdq %ymm14,%ymm0,%ymm12 vpunpckhqdq %ymm14,%ymm0,%ymm14 vmovdqa %ymm13,%ymm0 vpunpcklqdq %ymm15,%ymm0,%ymm13 vpunpckhqdq %ymm15,%ymm0,%ymm15 # interleave 128-bit words in state n, n+4 vmovdqa 0x00(%rsp),%ymm0 vperm2i128 $0x20,%ymm4,%ymm0,%ymm1 vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 vmovdqa %ymm1,0x00(%rsp) vmovdqa 0x20(%rsp),%ymm0 vperm2i128 $0x20,%ymm5,%ymm0,%ymm1 vperm2i128 $0x31,%ymm5,%ymm0,%ymm5 vmovdqa %ymm1,0x20(%rsp) vmovdqa 0x40(%rsp),%ymm0 vperm2i128 $0x20,%ymm6,%ymm0,%ymm1 vperm2i128 $0x31,%ymm6,%ymm0,%ymm6 vmovdqa %ymm1,0x40(%rsp) vmovdqa 0x60(%rsp),%ymm0 vperm2i128 $0x20,%ymm7,%ymm0,%ymm1 vperm2i128 $0x31,%ymm7,%ymm0,%ymm7 vmovdqa %ymm1,0x60(%rsp) vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 vmovdqa %ymm0,%ymm8 vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 vmovdqa %ymm0,%ymm9 vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 vmovdqa %ymm0,%ymm10 vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 vmovdqa %ymm0,%ymm11 # xor with corresponding input, write to output vmovdqa 0x00(%rsp),%ymm0 vpxor 0x0000(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0000(%rsi) vmovdqa 0x20(%rsp),%ymm0 vpxor 0x0080(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0080(%rsi) vmovdqa 0x40(%rsp),%ymm0 vpxor 0x0040(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0040(%rsi) vmovdqa 0x60(%rsp),%ymm0 vpxor 0x00c0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x00c0(%rsi) vpxor 0x0100(%rdx),%ymm4,%ymm4 vmovdqu %ymm4,0x0100(%rsi) vpxor 0x0180(%rdx),%ymm5,%ymm5 vmovdqu %ymm5,0x00180(%rsi) vpxor 0x0140(%rdx),%ymm6,%ymm6 vmovdqu %ymm6,0x0140(%rsi) vpxor 0x01c0(%rdx),%ymm7,%ymm7 vmovdqu %ymm7,0x01c0(%rsi) vpxor 0x0020(%rdx),%ymm8,%ymm8 vmovdqu %ymm8,0x0020(%rsi) vpxor 0x00a0(%rdx),%ymm9,%ymm9 vmovdqu %ymm9,0x00a0(%rsi) vpxor 0x0060(%rdx),%ymm10,%ymm10 vmovdqu %ymm10,0x0060(%rsi) vpxor 0x00e0(%rdx),%ymm11,%ymm11 vmovdqu %ymm11,0x00e0(%rsi) vpxor 0x0120(%rdx),%ymm12,%ymm12 vmovdqu %ymm12,0x0120(%rsi) vpxor 0x01a0(%rdx),%ymm13,%ymm13 vmovdqu %ymm13,0x01a0(%rsi) vpxor 0x0160(%rdx),%ymm14,%ymm14 vmovdqu %ymm14,0x0160(%rsi) vpxor 0x01e0(%rdx),%ymm15,%ymm15 vmovdqu %ymm15,0x01e0(%rsi) vzeroupper lea -8(%r10),%rsp ret ENDPROC(chacha20_8block_xor_avx2)
AirFortressIlikara/LS2K0300-linux-4.19
7,879
arch/x86/crypto/sha1_ni_asm.S
/* * Intel SHA Extensions optimized implementation of a SHA-1 update function * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Sean Gulley <sean.m.gulley@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2015 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/linkage.h> #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ #define RSPSAVE %rax /* gcc conversion */ #define FRAME_SIZE 32 /* space for 2x16 bytes */ #define ABCD %xmm0 #define E0 %xmm1 /* Need two E's b/c they ping pong */ #define E1 %xmm2 #define MSG0 %xmm3 #define MSG1 %xmm4 #define MSG2 %xmm5 #define MSG3 %xmm6 #define SHUF_MASK %xmm7 /* * Intel SHA Extensions optimized implementation of a SHA-1 update function * * The function takes a pointer to the current hash values, a pointer to the * input data, and a number of 64 byte blocks to process. Once all blocks have * been processed, the digest pointer is updated with the resulting hash value. * The function only processes complete blocks, there is no functionality to * store partial blocks. All message padding and hash value initialization must * be done outside the update function. * * The indented lines in the loop are instructions related to rounds processing. * The non-indented lines are instructions related to the message schedule. * * void sha1_ni_transform(uint32_t *digest, const void *data, uint32_t numBlocks) * digest : pointer to digest * data: pointer to input data * numBlocks: Number of blocks to process */ .text .align 32 ENTRY(sha1_ni_transform) mov %rsp, RSPSAVE sub $FRAME_SIZE, %rsp and $~0xF, %rsp shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash add DATA_PTR, NUM_BLKS /* pointer to end of data */ /* load initial hash values */ pinsrd $3, 1*16(DIGEST_PTR), E0 movdqu 0*16(DIGEST_PTR), ABCD pand UPPER_WORD_MASK(%rip), E0 pshufd $0x1B, ABCD, ABCD movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK .Lloop0: /* Save hash values for addition after rounds */ movdqa E0, (0*16)(%rsp) movdqa ABCD, (1*16)(%rsp) /* Rounds 0-3 */ movdqu 0*16(DATA_PTR), MSG0 pshufb SHUF_MASK, MSG0 paddd MSG0, E0 movdqa ABCD, E1 sha1rnds4 $0, E0, ABCD /* Rounds 4-7 */ movdqu 1*16(DATA_PTR), MSG1 pshufb SHUF_MASK, MSG1 sha1nexte MSG1, E1 movdqa ABCD, E0 sha1rnds4 $0, E1, ABCD sha1msg1 MSG1, MSG0 /* Rounds 8-11 */ movdqu 2*16(DATA_PTR), MSG2 pshufb SHUF_MASK, MSG2 sha1nexte MSG2, E0 movdqa ABCD, E1 sha1rnds4 $0, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 12-15 */ movdqu 3*16(DATA_PTR), MSG3 pshufb SHUF_MASK, MSG3 sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $0, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 16-19 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $0, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 20-23 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $1, E1, ABCD sha1msg1 MSG1, MSG0 pxor MSG1, MSG3 /* Rounds 24-27 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $1, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 28-31 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $1, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 32-35 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $1, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 36-39 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $1, E1, ABCD sha1msg1 MSG1, MSG0 pxor MSG1, MSG3 /* Rounds 40-43 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $2, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 44-47 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $2, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 48-51 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $2, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 52-55 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $2, E1, ABCD sha1msg1 MSG1, MSG0 pxor MSG1, MSG3 /* Rounds 56-59 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $2, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 60-63 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $3, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 64-67 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $3, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 68-71 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $3, E1, ABCD pxor MSG1, MSG3 /* Rounds 72-75 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $3, E0, ABCD /* Rounds 76-79 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1rnds4 $3, E1, ABCD /* Add current hash values with previously saved */ sha1nexte (0*16)(%rsp), E0 paddd (1*16)(%rsp), ABCD /* Increment data pointer and loop if more to process */ add $64, DATA_PTR cmp NUM_BLKS, DATA_PTR jne .Lloop0 /* Write hash values back in the correct order */ pshufd $0x1B, ABCD, ABCD movdqu ABCD, 0*16(DIGEST_PTR) pextrd $3, E0, 1*16(DIGEST_PTR) .Ldone_hash: mov RSPSAVE, %rsp ret ENDPROC(sha1_ni_transform) .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x000102030405060708090a0b0c0d0e0f .section .rodata.cst16.UPPER_WORD_MASK, "aM", @progbits, 16 .align 16 UPPER_WORD_MASK: .octa 0xFFFFFFFF000000000000000000000000
AirFortressIlikara/LS2K0300-linux-4.19
23,713
arch/x86/crypto/sha256-avx2-asm.S
######################################################################## # Implement fast SHA-256 with AVX2 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-256 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## # This code schedules 2 blocks at a time, with 4 lanes per block ######################################################################## #ifdef CONFIG_AS_AVX2 #include <linux/linkage.h> ## assume buffers not aligned #define VMOVDQ vmovdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm ################################ X0 = %ymm4 X1 = %ymm5 X2 = %ymm6 X3 = %ymm7 # XMM versions of above XWORD0 = %xmm4 XWORD1 = %xmm5 XWORD2 = %xmm6 XWORD3 = %xmm7 XTMP0 = %ymm0 XTMP1 = %ymm1 XTMP2 = %ymm2 XTMP3 = %ymm3 XTMP4 = %ymm8 XFER = %ymm9 XTMP5 = %ymm11 SHUF_00BA = %ymm10 # shuffle xBxA -> 00BA SHUF_DC00 = %ymm12 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %ymm13 X_BYTE_FLIP_MASK = %xmm13 # XMM version of BYTE_FLIP_MASK NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg c = %ecx d = %r8d e = %edx # clobbers NUM_BLKS y3 = %esi # clobbers INP SRND = CTX # SRND is same register as CTX a = %eax b = %ebx f = %r9d g = %r10d h = %r11d old_h = %r11d T1 = %r12d y0 = %r13d y1 = %r14d y2 = %r15d _XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 _RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE _RSP = _CTX + _CTX_SIZE STACK_SIZE = _RSP + _RSP_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED disp ################################### RND N + 0 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B addl \disp(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpsrld $7, XTMP1, XTMP2 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpslld $(32-7), XTMP1, XTMP3 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 vpsrld $18, XTMP1, XTMP2 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS ################################### RND N + 1 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B offset = \disp + 1*4 addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH rorx $6, e, y1 # y1 = (e >> 6) # S1 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- vpslld $(32-18), XTMP1, XTMP1 and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 vpxor XTMP1, XTMP3, XTMP3 rorx $2, a, T1 # T1 = (a >> 2) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} ROTATE_ARGS ################################### RND N + 2 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A offset = \disp + 2*4 addl offset(%rsp, SRND), h # h = k + w + h # -- vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA} rorx $11, e, y1 # y1 = e >> 11 # S1B or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH xor g, y2 # y2 = f^g # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA} and e, y2 # y2 = (f^g)&e # CH rorx $6, e, y1 # y1 = (e >> 6) # S1 vpxor XTMP3, XTMP2, XTMP2 add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA} xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA} xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a ,T1 # T1 = (a >> 2) # S0 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC} or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1,h # h = k + w + h + S0 # -- add y2,d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3,h # h = t1 + S0 + MAJ # -- ROTATE_ARGS ################################### RND N + 3 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B offset = \disp + 3*4 addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC} mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC} rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC} xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpxor XTMP3, XTMP2, XTMP2 rorx $22, a, y1 # y1 = a >> 22 # S0A add y0, y2 # y2 = S1 + CH # -- vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC} xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- rorx $2, a, T1 # T1 = (a >> 2) # S0 vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00} vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS rotate_Xs .endm .macro DO_4ROUNDS disp ################################### RND N + 0 ########################### mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 addl \disp(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 1 ########################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*1 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 2 ############################## add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*2 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 3 ########################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*3 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) ## arg 1 : pointer to digest ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text ENTRY(sha256_transform_rorx) .align 32 pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 mov %rsp, %rax subq $STACK_SIZE, %rsp and $-32, %rsp # align rsp to 32 byte boundary mov %rax, _RSP(%rsp) shl $6, NUM_BLKS # convert to bytes jz done_hash lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block mov NUM_BLKS, _INP_END(%rsp) cmp NUM_BLKS, INP je only_one_block ## load initial digest mov (CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 mov CTX, _CTX(%rsp) loop0: ## Load first 16 dwords from two blocks VMOVDQ 0*32(INP),XTMP0 VMOVDQ 1*32(INP),XTMP1 VMOVDQ 2*32(INP),XTMP2 VMOVDQ 3*32(INP),XTMP3 ## byte swap data vpshufb BYTE_FLIP_MASK, XTMP0, XTMP0 vpshufb BYTE_FLIP_MASK, XTMP1, XTMP1 vpshufb BYTE_FLIP_MASK, XTMP2, XTMP2 vpshufb BYTE_FLIP_MASK, XTMP3, XTMP3 ## transpose data into high/low halves vperm2i128 $0x20, XTMP2, XTMP0, X0 vperm2i128 $0x31, XTMP2, XTMP0, X1 vperm2i128 $0x20, XTMP3, XTMP1, X2 vperm2i128 $0x31, XTMP3, XTMP1, X3 last_block_enter: add $64, INP mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 12 each xor SRND, SRND .align 16 loop1: vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 0*32 vpaddd K256+1*32(SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 1*32 vpaddd K256+2*32(SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 2*32 vpaddd K256+3*32(SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 3*32 add $4*32, SRND cmp $3*4*32, SRND jb loop1 loop2: ## Do last 16 rounds with no scheduling vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 0*32 vpaddd K256+1*32(SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 1*32 add $2*32, SRND vmovdqa X2, X0 vmovdqa X3, X1 cmp $4*4*32, SRND jb loop2 mov _CTX(%rsp), CTX mov _INP(%rsp), INP addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h cmp _INP_END(%rsp), INP ja done_hash #### Do second block using previously scheduled results xor SRND, SRND .align 16 loop3: DO_4ROUNDS _XFER + 0*32 + 16 DO_4ROUNDS _XFER + 1*32 + 16 add $2*32, SRND cmp $4*4*32, SRND jb loop3 mov _CTX(%rsp), CTX mov _INP(%rsp), INP add $64, INP addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h cmp _INP_END(%rsp), INP jb loop0 ja done_hash do_last_block: VMOVDQ 0*16(INP),XWORD0 VMOVDQ 1*16(INP),XWORD1 VMOVDQ 2*16(INP),XWORD2 VMOVDQ 3*16(INP),XWORD3 vpshufb X_BYTE_FLIP_MASK, XWORD0, XWORD0 vpshufb X_BYTE_FLIP_MASK, XWORD1, XWORD1 vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2 vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3 jmp last_block_enter only_one_block: ## load initial digest mov (4*0)(CTX),a mov (4*1)(CTX),b mov (4*2)(CTX),c mov (4*3)(CTX),d mov (4*4)(CTX),e mov (4*5)(CTX),f mov (4*6)(CTX),g mov (4*7)(CTX),h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 mov CTX, _CTX(%rsp) jmp do_last_block done_hash: mov _RSP(%rsp), %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx ret ENDPROC(sha256_transform_rorx) .section .rodata.cst512.K256, "aM", @progbits, 512 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 # shuffle xBxA -> 00BA .section .rodata.cst32._SHUF_00BA, "aM", @progbits, 32 .align 32 _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 # shuffle xDxC -> DC00 .section .rodata.cst32._SHUF_DC00, "aM", @progbits, 32 .align 32 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF #endif
AirFortressIlikara/LS2K0300-linux-4.19
24,909
arch/x86/crypto/sha512-avx2-asm.S
######################################################################## # Implement fast SHA-512 with AVX2 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # David Cote <david.m.cote@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-512 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## # This code schedules 1 blocks at a time, with 4 lanes per block ######################################################################## #ifdef CONFIG_AS_AVX2 #include <linux/linkage.h> .text # Virtual Registers Y_0 = %ymm4 Y_1 = %ymm5 Y_2 = %ymm6 Y_3 = %ymm7 YTMP0 = %ymm0 YTMP1 = %ymm1 YTMP2 = %ymm2 YTMP3 = %ymm3 YTMP4 = %ymm8 XFER = YTMP0 BYTE_FLIP_MASK = %ymm9 # 1st arg is %rdi, which is saved to the stack and accessed later via %r12 CTX1 = %rdi CTX2 = %r12 # 2nd arg INP = %rsi # 3rd arg NUM_BLKS = %rdx c = %rcx d = %r8 e = %rdx y3 = %rsi TBL = %rdi # clobbers CTX1 a = %rax b = %rbx f = %r9 g = %r10 h = %r11 old_h = %r11 T1 = %r12 # clobbers CTX2 y0 = %r13 y1 = %r14 y2 = %r15 # Local variables (stack frame) XFER_SIZE = 4*8 SRND_SIZE = 1*8 INP_SIZE = 1*8 INPEND_SIZE = 1*8 CTX_SIZE = 1*8 RSPSAVE_SIZE = 1*8 GPRSAVE_SIZE = 5*8 frame_XFER = 0 frame_SRND = frame_XFER + XFER_SIZE frame_INP = frame_SRND + SRND_SIZE frame_INPEND = frame_INP + INP_SIZE frame_CTX = frame_INPEND + INPEND_SIZE frame_RSPSAVE = frame_CTX + CTX_SIZE frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE frame_size = frame_GPRSAVE + GPRSAVE_SIZE ## assume buffers not aligned #define VMOVDQ vmovdqu # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm # COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask # Load ymm with mem and byte swap each dword .macro COPY_YMM_AND_BSWAP p1 p2 p3 VMOVDQ \p2, \p1 vpshufb \p3, \p1, \p1 .endm # rotate_Ys # Rotate values of symbols Y0...Y3 .macro rotate_Ys Y_ = Y_0 Y_0 = Y_1 Y_1 = Y_2 Y_2 = Y_3 Y_3 = Y_ .endm # RotateState .macro RotateState # Rotate symbols a..h right old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm # macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL # YDST = {YSRC1, YSRC2} >> RVAL*8 .macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI} vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8 .endm .macro FOUR_ROUNDS_AND_SCHED ################################### RND N + 0 ######################################### # Extract w[t-7] MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] # Calculate w[t-16] + w[t-7] vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] # Extract w[t-15] MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] # Calculate sigma0 # Calculate w[t-15] ror 1 vpsrlq $1, YTMP1, YTMP2 vpsllq $(64-1), YTMP1, YTMP3 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 # Calculate w[t-15] shr 7 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add frame_XFER(%rsp),h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 1 ######################################### # Calculate w[t-15] ror 8 vpsrlq $8, YTMP1, YTMP2 vpsllq $(64-8), YTMP1, YTMP1 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 # XOR the three components vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 # Add three components, w[t-16], w[t-7] and sigma0 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 # Move to appropriate lanes for calculating w[16] and w[17] vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} # Move to appropriate lanes for calculating w[18] and w[19] vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} # Calculate w[16] and w[17] in both 128 bit lanes # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA} vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add 1*8+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 2 ######################################### vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA} vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA} vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} # Add sigma1 to the other compunents to get w[16] and w[17] vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]} # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A add 2*8+frame_XFER(%rsp), h # h = k + w + h # -- rorx $18, e, y1 # y1 = e >> 18 # S1B or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH xor g, y2 # y2 = f^g # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 and e, y2 # y2 = (f^g)&e # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 3 ######################################### vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--} vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--} vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] # to newly calculated sigma1 to get w[18] and w[19] vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --} # Form w[19, w[18], w17], w[16] vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add 3*8+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A add y0, y2 # y2 = S1 + CH # -- xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- rorx $28, a, T1 # T1 = (a >> 28) # S0 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState rotate_Ys .endm .macro DO_4ROUNDS ################################### RND N + 0 ######################################### mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 1 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*1+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 2 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*2+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 3 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*3+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState .endm ######################################################################## # void sha512_transform_rorx(void* D, const void* M, uint64_t L)# # Purpose: Updates the SHA512 digest stored at D with the message stored in M. # The size of the message pointed to by M must be an integer multiple of SHA512 # message blocks. # L is the message length in SHA512 blocks ######################################################################## ENTRY(sha512_transform_rorx) # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp and $~(0x20 - 1), %rsp mov %rax, frame_RSPSAVE(%rsp) # Save GPRs mov %rbx, 8*0+frame_GPRSAVE(%rsp) mov %r12, 8*1+frame_GPRSAVE(%rsp) mov %r13, 8*2+frame_GPRSAVE(%rsp) mov %r14, 8*3+frame_GPRSAVE(%rsp) mov %r15, 8*4+frame_GPRSAVE(%rsp) shl $7, NUM_BLKS # convert to bytes jz done_hash add INP, NUM_BLKS # pointer to end of data mov NUM_BLKS, frame_INPEND(%rsp) ## load initial digest mov 8*0(CTX1), a mov 8*1(CTX1), b mov 8*2(CTX1), c mov 8*3(CTX1), d mov 8*4(CTX1), e mov 8*5(CTX1), f mov 8*6(CTX1), g mov 8*7(CTX1), h # save %rdi (CTX) before it gets clobbered mov %rdi, frame_CTX(%rsp) vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK loop0: lea K512(%rip), TBL ## byte swap first 16 dwords COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK mov INP, frame_INP(%rsp) ## schedule 64 input dwords, by doing 12 rounds of 4 each movq $4, frame_SRND(%rsp) .align 16 loop1: vpaddq (TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 1*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 2*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 3*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) add $(4*32), TBL FOUR_ROUNDS_AND_SCHED subq $1, frame_SRND(%rsp) jne loop1 movq $2, frame_SRND(%rsp) loop2: vpaddq (TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) DO_4ROUNDS vpaddq 1*32(TBL), Y_1, XFER vmovdqa XFER, frame_XFER(%rsp) add $(2*32), TBL DO_4ROUNDS vmovdqa Y_2, Y_0 vmovdqa Y_3, Y_1 subq $1, frame_SRND(%rsp) jne loop2 mov frame_CTX(%rsp), CTX2 addm 8*0(CTX2), a addm 8*1(CTX2), b addm 8*2(CTX2), c addm 8*3(CTX2), d addm 8*4(CTX2), e addm 8*5(CTX2), f addm 8*6(CTX2), g addm 8*7(CTX2), h mov frame_INP(%rsp), INP add $128, INP cmp frame_INPEND(%rsp), INP jne loop0 done_hash: # Restore GPRs mov 8*0+frame_GPRSAVE(%rsp), %rbx mov 8*1+frame_GPRSAVE(%rsp), %r12 mov 8*2+frame_GPRSAVE(%rsp), %r13 mov 8*3+frame_GPRSAVE(%rsp), %r14 mov 8*4+frame_GPRSAVE(%rsp), %r15 # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp ret ENDPROC(sha512_transform_rorx) ######################################################################## ### Binary Data # Mergeable 640-byte rodata section. This allows linker to merge the table # with other, exactly the same 640-byte fragment of another rodata section # (if such section exists). .section .rodata.cst640.K512, "aM", @progbits, 640 .align 64 # K[t] used in SHA512 hashing K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32 .align 32 MASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #endif
AirFortressIlikara/LS2K0300-linux-4.19
2,906
arch/x86/crypto/ghash-clmulni-intel_asm.S
/* * Accelerated GHASH implementation with Intel PCLMULQDQ-NI * instructions. This file contains accelerated part of ghash * implementation. More information about PCLMULQDQ can be found at: * * http://software.intel.com/en-us/articles/carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/ * * Copyright (c) 2009 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * Vinodh Gopal * Erdinc Ozturk * Deniz Karakoyunlu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/inst.h> #include <asm/frame.h> .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 .align 16 .Lbswap_mask: .octa 0x000102030405060708090a0b0c0d0e0f #define DATA %xmm0 #define SHASH %xmm1 #define T1 %xmm2 #define T2 %xmm3 #define T3 %xmm4 #define BSWAP %xmm5 #define IN1 %xmm6 .text /* * __clmul_gf128mul_ble: internal ABI * input: * DATA: operand1 * SHASH: operand2, hash_key << 1 mod poly * output: * DATA: operand1 * operand2 mod poly * changed: * T1 * T2 * T3 */ __clmul_gf128mul_ble: movaps DATA, T1 pshufd $0b01001110, DATA, T2 pshufd $0b01001110, SHASH, T3 pxor DATA, T2 pxor SHASH, T3 PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0 PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1 PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0) pxor DATA, T2 pxor T1, T2 # T2 = a0 * b1 + a1 * b0 movaps T2, T3 pslldq $8, T3 psrldq $8, T2 pxor T3, DATA pxor T2, T1 # <T1:DATA> is result of # carry-less multiplication # first phase of the reduction movaps DATA, T3 psllq $1, T3 pxor DATA, T3 psllq $5, T3 pxor DATA, T3 psllq $57, T3 movaps T3, T2 pslldq $8, T2 psrldq $8, T3 pxor T2, DATA pxor T3, T1 # second phase of the reduction movaps DATA, T2 psrlq $5, T2 pxor DATA, T2 psrlq $1, T2 pxor DATA, T2 psrlq $1, T2 pxor T2, T1 pxor T1, DATA ret ENDPROC(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ ENTRY(clmul_ghash_mul) FRAME_BEGIN movups (%rdi), DATA movups (%rsi), SHASH movaps .Lbswap_mask, BSWAP PSHUFB_XMM BSWAP DATA call __clmul_gf128mul_ble PSHUFB_XMM BSWAP DATA movups DATA, (%rdi) FRAME_END ret ENDPROC(clmul_ghash_mul) /* * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, * const u128 *shash); */ ENTRY(clmul_ghash_update) FRAME_BEGIN cmp $16, %rdx jb .Lupdate_just_ret # check length movaps .Lbswap_mask, BSWAP movups (%rdi), DATA movups (%rcx), SHASH PSHUFB_XMM BSWAP DATA .align 4 .Lupdate_loop: movups (%rsi), IN1 PSHUFB_XMM BSWAP IN1 pxor IN1, DATA call __clmul_gf128mul_ble sub $16, %rdx add $16, %rsi cmp $16, %rdx jge .Lupdate_loop PSHUFB_XMM BSWAP DATA movups DATA, (%rdi) .Lupdate_just_ret: FRAME_END ret ENDPROC(clmul_ghash_update)
AirFortressIlikara/LS2K0300-linux-4.19
4,042
arch/x86/math-emu/polynom_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | polynomial_Xsig.S | | | | Fixed point arithmetic polynomial evaluation. | | | | Copyright (C) 1992,1993,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | void polynomial_Xsig(Xsig *accum, unsigned long long x, | | unsigned long long terms[], int n) | | | | Computes: | | terms[0] + (terms[1] + (terms[2] + ... + (terms[n-1]*x)*x)*x)*x) ... )*x | | and adds the result to the 12 byte Xsig. | | The terms[] are each 8 bytes, but all computation is performed to 12 byte | | precision. | | | | This function must be used carefully: most overflow of intermediate | | results is controlled, but overflow of the result is not. | | | +---------------------------------------------------------------------------*/ .file "polynomial_Xsig.S" #include "fpu_emu.h" #define TERM_SIZE $8 #define SUM_MS -20(%ebp) /* sum ms long */ #define SUM_MIDDLE -24(%ebp) /* sum middle long */ #define SUM_LS -28(%ebp) /* sum ls long */ #define ACCUM_MS -4(%ebp) /* accum ms long */ #define ACCUM_MIDDLE -8(%ebp) /* accum middle long */ #define ACCUM_LS -12(%ebp) /* accum ls long */ #define OVERFLOWED -16(%ebp) /* addition overflow flag */ .text ENTRY(polynomial_Xsig) pushl %ebp movl %esp,%ebp subl $32,%esp pushl %esi pushl %edi pushl %ebx movl PARAM2,%esi /* x */ movl PARAM3,%edi /* terms */ movl TERM_SIZE,%eax mull PARAM4 /* n */ addl %eax,%edi movl 4(%edi),%edx /* terms[n] */ movl %edx,SUM_MS movl (%edi),%edx /* terms[n] */ movl %edx,SUM_MIDDLE xor %eax,%eax movl %eax,SUM_LS movb %al,OVERFLOWED subl TERM_SIZE,%edi decl PARAM4 js L_accum_done L_accum_loop: xor %eax,%eax movl %eax,ACCUM_MS movl %eax,ACCUM_MIDDLE movl SUM_MIDDLE,%eax mull (%esi) /* x ls long */ movl %edx,ACCUM_LS movl SUM_MIDDLE,%eax mull 4(%esi) /* x ms long */ addl %eax,ACCUM_LS adcl %edx,ACCUM_MIDDLE adcl $0,ACCUM_MS movl SUM_MS,%eax mull (%esi) /* x ls long */ addl %eax,ACCUM_LS adcl %edx,ACCUM_MIDDLE adcl $0,ACCUM_MS movl SUM_MS,%eax mull 4(%esi) /* x ms long */ addl %eax,ACCUM_MIDDLE adcl %edx,ACCUM_MS testb $0xff,OVERFLOWED jz L_no_overflow movl (%esi),%eax addl %eax,ACCUM_MIDDLE movl 4(%esi),%eax adcl %eax,ACCUM_MS /* This could overflow too */ L_no_overflow: /* * Now put the sum of next term and the accumulator * into the sum register */ movl ACCUM_LS,%eax addl (%edi),%eax /* term ls long */ movl %eax,SUM_LS movl ACCUM_MIDDLE,%eax adcl (%edi),%eax /* term ls long */ movl %eax,SUM_MIDDLE movl ACCUM_MS,%eax adcl 4(%edi),%eax /* term ms long */ movl %eax,SUM_MS sbbb %al,%al movb %al,OVERFLOWED /* Used in the next iteration */ subl TERM_SIZE,%edi decl PARAM4 jns L_accum_loop L_accum_done: movl PARAM1,%edi /* accum */ movl SUM_LS,%eax addl %eax,(%edi) movl SUM_MIDDLE,%eax adcl %eax,4(%edi) movl SUM_MS,%eax adcl %eax,8(%edi) popl %ebx popl %edi popl %esi leave ret ENDPROC(polynomial_Xsig)
AirFortressIlikara/LS2K0300-linux-4.19
10,153
arch/x86/math-emu/div_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "div_Xsig.S" /*---------------------------------------------------------------------------+ | div_Xsig.S | | | | Division subroutine for 96 bit quantities | | | | Copyright (C) 1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Divide the 96 bit quantity pointed to by a, by that pointed to by b, and | | put the 96 bit result at the location d. | | | | The result may not be accurate to 96 bits. It is intended for use where | | a result better than 64 bits is required. The result should usually be | | good to at least 94 bits. | | The returned result is actually divided by one half. This is done to | | prevent overflow. | | | | .aaaaaaaaaaaaaa / .bbbbbbbbbbbbb -> .dddddddddddd | | | | void div_Xsig(Xsig *a, Xsig *b, Xsig *dest) | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #define XsigLL(x) (x) #define XsigL(x) 4(x) #define XsigH(x) 8(x) #ifndef NON_REENTRANT_FPU /* Local storage on the stack: Accumulator: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 */ #define FPU_accum_3 -4(%ebp) #define FPU_accum_2 -8(%ebp) #define FPU_accum_1 -12(%ebp) #define FPU_accum_0 -16(%ebp) #define FPU_result_3 -20(%ebp) #define FPU_result_2 -24(%ebp) #define FPU_result_1 -28(%ebp) #else .data /* Local storage in a static area: Accumulator: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 */ .align 4,0 FPU_accum_3: .long 0 FPU_accum_2: .long 0 FPU_accum_1: .long 0 FPU_accum_0: .long 0 FPU_result_3: .long 0 FPU_result_2: .long 0 FPU_result_1: .long 0 #endif /* NON_REENTRANT_FPU */ .text ENTRY(div_Xsig) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $28,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi /* pointer to num */ movl PARAM2,%ebx /* pointer to denom */ #ifdef PARANOID testl $0x80000000, XsigH(%ebx) /* Divisor */ je L_bugged #endif /* PARANOID */ /*---------------------------------------------------------------------------+ | Divide: Return arg1/arg2 to arg3. | | | | The maximum returned value is (ignoring exponents) | | .ffffffff ffffffff | | ------------------ = 1.ffffffff fffffffe | | .80000000 00000000 | | and the minimum is | | .80000000 00000000 | | ------------------ = .80000000 00000001 (rounded) | | .ffffffff ffffffff | | | +---------------------------------------------------------------------------*/ /* Save extended dividend in local register */ /* Divide by 2 to prevent overflow */ clc movl XsigH(%esi),%eax rcrl %eax movl %eax,FPU_accum_3 movl XsigL(%esi),%eax rcrl %eax movl %eax,FPU_accum_2 movl XsigLL(%esi),%eax rcrl %eax movl %eax,FPU_accum_1 movl $0,%eax rcrl %eax movl %eax,FPU_accum_0 movl FPU_accum_2,%eax /* Get the current num */ movl FPU_accum_3,%edx /*----------------------------------------------------------------------*/ /* Initialization done. Do the first 32 bits. */ /* We will divide by a number which is too large */ movl XsigH(%ebx),%ecx addl $1,%ecx jnc LFirst_div_not_1 /* here we need to divide by 100000000h, i.e., no division at all.. */ mov %edx,%eax jmp LFirst_div_done LFirst_div_not_1: divl %ecx /* Divide the numerator by the augmented denom ms dw */ LFirst_div_done: movl %eax,FPU_result_3 /* Put the result in the answer */ mull XsigH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_2 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_3 movl FPU_result_3,%eax /* Get the result back */ mull XsigL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 sbbl $0,FPU_accum_3 je LDo_2nd_32_bits /* Must check for non-zero result here */ #ifdef PARANOID jb L_bugged_1 #endif /* PARANOID */ /* need to subtract another once of the denom */ incl FPU_result_3 /* Correct the answer */ movl XsigL(%ebx),%eax movl XsigH(%ebx),%edx subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID sbbl $0,FPU_accum_3 jne L_bugged_1 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* Half of the main problem is done, there is just a reduced numerator to handle now. Work with the second 32 bits, FPU_accum_0 not used from now on */ LDo_2nd_32_bits: movl FPU_accum_2,%edx /* get the reduced num */ movl FPU_accum_1,%eax /* need to check for possible subsequent overflow */ cmpl XsigH(%ebx),%edx jb LDo_2nd_div ja LPrevent_2nd_overflow cmpl XsigL(%ebx),%eax jb LDo_2nd_div LPrevent_2nd_overflow: /* The numerator is greater or equal, would cause overflow */ /* prevent overflow */ subl XsigL(%ebx),%eax sbbl XsigH(%ebx),%edx movl %edx,FPU_accum_2 movl %eax,FPU_accum_1 incl FPU_result_3 /* Reflect the subtraction in the answer */ #ifdef PARANOID je L_bugged_2 /* Can't bump the result to 1.0 */ #endif /* PARANOID */ LDo_2nd_div: cmpl $0,%ecx /* augmented denom msw */ jnz LSecond_div_not_1 /* %ecx == 0, we are dividing by 1.0 */ mov %edx,%eax jmp LSecond_div_done LSecond_div_not_1: divl %ecx /* Divide the numerator by the denom ms dw */ LSecond_div_done: movl %eax,FPU_result_2 /* Put the result in the answer */ mull XsigH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ movl FPU_result_2,%eax /* Get the result back */ mull XsigL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 /* Subtract from the num local reg */ sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ jz LDo_3rd_32_bits #ifdef PARANOID cmpl $1,FPU_accum_2 jne L_bugged_2 #endif /* PARANOID */ /* need to subtract another once of the denom */ movl XsigL(%ebx),%eax movl XsigH(%ebx),%edx subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 jne L_bugged_2 #endif /* PARANOID */ addl $1,FPU_result_2 /* Correct the answer */ adcl $0,FPU_result_3 #ifdef PARANOID jc L_bugged_2 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* The division is essentially finished here, we just need to perform tidying operations. Deal with the 3rd 32 bits */ LDo_3rd_32_bits: /* We use an approximation for the third 32 bits. To take account of the 3rd 32 bits of the divisor (call them del), we subtract del * (a/b) */ movl FPU_result_3,%eax /* a/b */ mull XsigLL(%ebx) /* del */ subl %edx,FPU_accum_1 /* A borrow indicates that the result is negative */ jnb LTest_over movl XsigH(%ebx),%edx addl %edx,FPU_accum_1 subl $1,FPU_result_2 /* Adjust the answer */ sbbl $0,FPU_result_3 /* The above addition might not have been enough, check again. */ movl FPU_accum_1,%edx /* get the reduced num */ cmpl XsigH(%ebx),%edx /* denom */ jb LDo_3rd_div movl XsigH(%ebx),%edx addl %edx,FPU_accum_1 subl $1,FPU_result_2 /* Adjust the answer */ sbbl $0,FPU_result_3 jmp LDo_3rd_div LTest_over: movl FPU_accum_1,%edx /* get the reduced num */ /* need to check for possible subsequent overflow */ cmpl XsigH(%ebx),%edx /* denom */ jb LDo_3rd_div /* prevent overflow */ subl XsigH(%ebx),%edx movl %edx,FPU_accum_1 addl $1,FPU_result_2 /* Reflect the subtraction in the answer */ adcl $0,FPU_result_3 LDo_3rd_div: movl FPU_accum_0,%eax movl FPU_accum_1,%edx divl XsigH(%ebx) movl %eax,FPU_result_1 /* Rough estimate of third word */ movl PARAM3,%esi /* pointer to answer */ movl FPU_result_1,%eax movl %eax,XsigLL(%esi) movl FPU_result_2,%eax movl %eax,XsigL(%esi) movl FPU_result_3,%eax movl %eax,XsigH(%esi) L_exit: popl %ebx popl %edi popl %esi leave ret #ifdef PARANOID /* The logic is wrong if we got here */ L_bugged: pushl EX_INTERNAL|0x240 call EXCEPTION pop %ebx jmp L_exit L_bugged_1: pushl EX_INTERNAL|0x241 call EXCEPTION pop %ebx jmp L_exit L_bugged_2: pushl EX_INTERNAL|0x242 call EXCEPTION pop %ebx jmp L_exit #endif /* PARANOID */ ENDPROC(div_Xsig)
AirFortressIlikara/LS2K0300-linux-4.19
18,063
arch/x86/math-emu/reg_round.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_round.S" /*---------------------------------------------------------------------------+ | reg_round.S | | | | Rounding/truncation/etc for FPU basic arithmetic functions. | | | | Copyright (C) 1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | This code has four possible entry points. | | The following must be entered by a jmp instruction: | | fpu_reg_round, fpu_reg_round_sqrt, and fpu_Arith_exit. | | | | The FPU_round entry point is intended to be used by C code. | | From C, call as: | | int FPU_round(FPU_REG *arg, unsigned int extent, unsigned int control_w) | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | | For correct "up" and "down" rounding, the argument must have the correct | | sign. | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Four entry points. | | | | Needed by both the fpu_reg_round and fpu_reg_round_sqrt entry points: | | %eax:%ebx 64 bit significand | | %edx 32 bit extension of the significand | | %edi pointer to an FPU_REG for the result to be stored | | stack calling function must have set up a C stack frame and | | pushed %esi, %edi, and %ebx | | | | Needed just for the fpu_reg_round_sqrt entry point: | | %cx A control word in the same format as the FPU control word. | | Otherwise, PARAM4 must give such a value. | | | | | | The significand and its extension are assumed to be exact in the | | following sense: | | If the significand by itself is the exact result then the significand | | extension (%edx) must contain 0, otherwise the significand extension | | must be non-zero. | | If the significand extension is non-zero then the significand is | | smaller than the magnitude of the correct exact result by an amount | | greater than zero and less than one ls bit of the significand. | | The significand extension is only required to have three possible | | non-zero values: | | less than 0x80000000 <=> the significand is less than 1/2 an ls | | bit smaller than the magnitude of the | | true exact result. | | exactly 0x80000000 <=> the significand is exactly 1/2 an ls bit | | smaller than the magnitude of the true | | exact result. | | greater than 0x80000000 <=> the significand is more than 1/2 an ls | | bit smaller than the magnitude of the | | true exact result. | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | The code in this module has become quite complex, but it should handle | | all of the FPU flags which are set at this stage of the basic arithmetic | | computations. | | There are a few rare cases where the results are not set identically to | | a real FPU. These require a bit more thought because at this stage the | | results of the code here appear to be more consistent... | | This may be changed in a future version. | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" #include "exception.h" #include "control_w.h" /* Flags for FPU_bits_lost */ #define LOST_DOWN $1 #define LOST_UP $2 /* Flags for FPU_denormal */ #define DENORMAL $1 #define UNMASKED_UNDERFLOW $2 #ifndef NON_REENTRANT_FPU /* Make the code re-entrant by putting local storage on the stack: */ #define FPU_bits_lost (%esp) #define FPU_denormal 1(%esp) #else /* Not re-entrant, so we can gain speed by putting local storage in a static area: */ .data .align 4,0 FPU_bits_lost: .byte 0 FPU_denormal: .byte 0 #endif /* NON_REENTRANT_FPU */ .text .globl fpu_reg_round .globl fpu_Arith_exit /* Entry point when called from C */ ENTRY(FPU_round) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi pushl %ebx movl PARAM1,%edi movl SIGH(%edi),%eax movl SIGL(%edi),%ebx movl PARAM2,%edx fpu_reg_round: /* Normal entry point */ movl PARAM4,%ecx #ifndef NON_REENTRANT_FPU pushl %ebx /* adjust the stack pointer */ #endif /* NON_REENTRANT_FPU */ #ifdef PARANOID /* Cannot use this here yet */ /* orl %eax,%eax */ /* jns L_entry_bugged */ #endif /* PARANOID */ cmpw EXP_UNDER,EXP(%edi) jle L_Make_denorm /* The number is a de-normal */ movb $0,FPU_denormal /* 0 -> not a de-normal */ Denorm_done: movb $0,FPU_bits_lost /* No bits yet lost in rounding */ movl %ecx,%esi andl CW_PC,%ecx cmpl PR_64_BITS,%ecx je LRound_To_64 cmpl PR_53_BITS,%ecx je LRound_To_53 cmpl PR_24_BITS,%ecx je LRound_To_24 #ifdef PECULIAR_486 /* With the precision control bits set to 01 "(reserved)", a real 80486 behaves as if the precision control bits were set to 11 "64 bits" */ cmpl PR_RESERVED_BITS,%ecx je LRound_To_64 #ifdef PARANOID jmp L_bugged_denorm_486 #endif /* PARANOID */ #else #ifdef PARANOID jmp L_bugged_denorm /* There is no bug, just a bad control word */ #endif /* PARANOID */ #endif /* PECULIAR_486 */ /* Round etc to 24 bit precision */ LRound_To_24: movl %esi,%ecx andl CW_RC,%ecx cmpl RC_RND,%ecx je LRound_nearest_24 cmpl RC_CHOP,%ecx je LCheck_truncate_24 cmpl RC_UP,%ecx /* Towards +infinity */ je LUp_24 cmpl RC_DOWN,%ecx /* Towards -infinity */ je LDown_24 #ifdef PARANOID jmp L_bugged_round24 #endif /* PARANOID */ LUp_24: cmpb SIGN_POS,PARAM5 jne LCheck_truncate_24 /* If negative then up==truncate */ jmp LCheck_24_round_up LDown_24: cmpb SIGN_POS,PARAM5 je LCheck_truncate_24 /* If positive then down==truncate */ LCheck_24_round_up: movl %eax,%ecx andl $0x000000ff,%ecx orl %ebx,%ecx orl %edx,%ecx jnz LDo_24_round_up jmp L_Re_normalise LRound_nearest_24: /* Do rounding of the 24th bit if needed (nearest or even) */ movl %eax,%ecx andl $0x000000ff,%ecx cmpl $0x00000080,%ecx jc LCheck_truncate_24 /* less than half, no increment needed */ jne LGreater_Half_24 /* greater than half, increment needed */ /* Possibly half, we need to check the ls bits */ orl %ebx,%ebx jnz LGreater_Half_24 /* greater than half, increment needed */ orl %edx,%edx jnz LGreater_Half_24 /* greater than half, increment needed */ /* Exactly half, increment only if 24th bit is 1 (round to even) */ testl $0x00000100,%eax jz LDo_truncate_24 LGreater_Half_24: /* Rounding: increment at the 24th bit */ LDo_24_round_up: andl $0xffffff00,%eax /* Truncate to 24 bits */ xorl %ebx,%ebx movb LOST_UP,FPU_bits_lost addl $0x00000100,%eax jmp LCheck_Round_Overflow LCheck_truncate_24: movl %eax,%ecx andl $0x000000ff,%ecx orl %ebx,%ecx orl %edx,%ecx jz L_Re_normalise /* No truncation needed */ LDo_truncate_24: andl $0xffffff00,%eax /* Truncate to 24 bits */ xorl %ebx,%ebx movb LOST_DOWN,FPU_bits_lost jmp L_Re_normalise /* Round etc to 53 bit precision */ LRound_To_53: movl %esi,%ecx andl CW_RC,%ecx cmpl RC_RND,%ecx je LRound_nearest_53 cmpl RC_CHOP,%ecx je LCheck_truncate_53 cmpl RC_UP,%ecx /* Towards +infinity */ je LUp_53 cmpl RC_DOWN,%ecx /* Towards -infinity */ je LDown_53 #ifdef PARANOID jmp L_bugged_round53 #endif /* PARANOID */ LUp_53: cmpb SIGN_POS,PARAM5 jne LCheck_truncate_53 /* If negative then up==truncate */ jmp LCheck_53_round_up LDown_53: cmpb SIGN_POS,PARAM5 je LCheck_truncate_53 /* If positive then down==truncate */ LCheck_53_round_up: movl %ebx,%ecx andl $0x000007ff,%ecx orl %edx,%ecx jnz LDo_53_round_up jmp L_Re_normalise LRound_nearest_53: /* Do rounding of the 53rd bit if needed (nearest or even) */ movl %ebx,%ecx andl $0x000007ff,%ecx cmpl $0x00000400,%ecx jc LCheck_truncate_53 /* less than half, no increment needed */ jnz LGreater_Half_53 /* greater than half, increment needed */ /* Possibly half, we need to check the ls bits */ orl %edx,%edx jnz LGreater_Half_53 /* greater than half, increment needed */ /* Exactly half, increment only if 53rd bit is 1 (round to even) */ testl $0x00000800,%ebx jz LTruncate_53 LGreater_Half_53: /* Rounding: increment at the 53rd bit */ LDo_53_round_up: movb LOST_UP,FPU_bits_lost andl $0xfffff800,%ebx /* Truncate to 53 bits */ addl $0x00000800,%ebx adcl $0,%eax jmp LCheck_Round_Overflow LCheck_truncate_53: movl %ebx,%ecx andl $0x000007ff,%ecx orl %edx,%ecx jz L_Re_normalise LTruncate_53: movb LOST_DOWN,FPU_bits_lost andl $0xfffff800,%ebx /* Truncate to 53 bits */ jmp L_Re_normalise /* Round etc to 64 bit precision */ LRound_To_64: movl %esi,%ecx andl CW_RC,%ecx cmpl RC_RND,%ecx je LRound_nearest_64 cmpl RC_CHOP,%ecx je LCheck_truncate_64 cmpl RC_UP,%ecx /* Towards +infinity */ je LUp_64 cmpl RC_DOWN,%ecx /* Towards -infinity */ je LDown_64 #ifdef PARANOID jmp L_bugged_round64 #endif /* PARANOID */ LUp_64: cmpb SIGN_POS,PARAM5 jne LCheck_truncate_64 /* If negative then up==truncate */ orl %edx,%edx jnz LDo_64_round_up jmp L_Re_normalise LDown_64: cmpb SIGN_POS,PARAM5 je LCheck_truncate_64 /* If positive then down==truncate */ orl %edx,%edx jnz LDo_64_round_up jmp L_Re_normalise LRound_nearest_64: cmpl $0x80000000,%edx jc LCheck_truncate_64 jne LDo_64_round_up /* Now test for round-to-even */ testb $1,%bl jz LCheck_truncate_64 LDo_64_round_up: movb LOST_UP,FPU_bits_lost addl $1,%ebx adcl $0,%eax LCheck_Round_Overflow: jnc L_Re_normalise /* Overflow, adjust the result (significand to 1.0) */ rcrl $1,%eax rcrl $1,%ebx incw EXP(%edi) jmp L_Re_normalise LCheck_truncate_64: orl %edx,%edx jz L_Re_normalise LTruncate_64: movb LOST_DOWN,FPU_bits_lost L_Re_normalise: testb $0xff,FPU_denormal jnz Normalise_result L_Normalised: movl TAG_Valid,%edx L_deNormalised: cmpb LOST_UP,FPU_bits_lost je L_precision_lost_up cmpb LOST_DOWN,FPU_bits_lost je L_precision_lost_down L_no_precision_loss: /* store the result */ L_Store_significand: movl %eax,SIGH(%edi) movl %ebx,SIGL(%edi) cmpw EXP_OVER,EXP(%edi) jge L_overflow movl %edx,%eax /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%edi) andw $0x7fff,EXP(%edi) fpu_reg_round_signed_special_exit: cmpb SIGN_POS,PARAM5 je fpu_reg_round_special_exit orw $0x8000,EXP(%edi) /* Negative sign for the result. */ fpu_reg_round_special_exit: #ifndef NON_REENTRANT_FPU popl %ebx /* adjust the stack pointer */ #endif /* NON_REENTRANT_FPU */ fpu_Arith_exit: popl %ebx popl %edi popl %esi leave ret /* * Set the FPU status flags to represent precision loss due to * round-up. */ L_precision_lost_up: push %edx push %eax call set_precision_flag_up popl %eax popl %edx jmp L_no_precision_loss /* * Set the FPU status flags to represent precision loss due to * truncation. */ L_precision_lost_down: push %edx push %eax call set_precision_flag_down popl %eax popl %edx jmp L_no_precision_loss /* * The number is a denormal (which might get rounded up to a normal) * Shift the number right the required number of bits, which will * have to be undone later... */ L_Make_denorm: /* The action to be taken depends upon whether the underflow exception is masked */ testb CW_Underflow,%cl /* Underflow mask. */ jz Unmasked_underflow /* Do not make a denormal. */ movb DENORMAL,FPU_denormal pushl %ecx /* Save */ movw EXP_UNDER+1,%cx subw EXP(%edi),%cx cmpw $64,%cx /* shrd only works for 0..31 bits */ jnc Denorm_shift_more_than_63 cmpw $32,%cx /* shrd only works for 0..31 bits */ jnc Denorm_shift_more_than_32 /* * We got here without jumps by assuming that the most common requirement * is for a small de-normalising shift. * Shift by [1..31] bits */ addw %cx,EXP(%edi) orl %edx,%edx /* extension */ setne %ch /* Save whether %edx is non-zero */ xorl %edx,%edx shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax orb %ch,%dl popl %ecx jmp Denorm_done /* Shift by [32..63] bits */ Denorm_shift_more_than_32: addw %cx,EXP(%edi) subb $32,%cl orl %edx,%edx setne %ch orb %ch,%bl xorl %edx,%edx shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax orl %edx,%edx /* test these 32 bits */ setne %cl orb %ch,%bl orb %cl,%bl movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax popl %ecx jmp Denorm_done /* Shift by [64..) bits */ Denorm_shift_more_than_63: cmpw $64,%cx jne Denorm_shift_more_than_64 /* Exactly 64 bit shift */ addw %cx,EXP(%edi) xorl %ecx,%ecx orl %edx,%edx setne %cl orl %ebx,%ebx setne %ch orb %ch,%cl orb %cl,%al movl %eax,%edx xorl %eax,%eax xorl %ebx,%ebx popl %ecx jmp Denorm_done Denorm_shift_more_than_64: movw EXP_UNDER+1,EXP(%edi) /* This is easy, %eax must be non-zero, so.. */ movl $1,%edx xorl %eax,%eax xorl %ebx,%ebx popl %ecx jmp Denorm_done Unmasked_underflow: movb UNMASKED_UNDERFLOW,FPU_denormal jmp Denorm_done /* Undo the de-normalisation. */ Normalise_result: cmpb UNMASKED_UNDERFLOW,FPU_denormal je Signal_underflow /* The number must be a denormal if we got here. */ #ifdef PARANOID /* But check it... just in case. */ cmpw EXP_UNDER+1,EXP(%edi) jne L_norm_bugged #endif /* PARANOID */ #ifdef PECULIAR_486 /* * This implements a special feature of 80486 behaviour. * Underflow will be signalled even if the number is * not a denormal after rounding. * This difference occurs only for masked underflow, and not * in the unmasked case. * Actual 80486 behaviour differs from this in some circumstances. */ orl %eax,%eax /* ms bits */ js LPseudoDenormal /* Will be masked underflow */ #else orl %eax,%eax /* ms bits */ js L_Normalised /* No longer a denormal */ #endif /* PECULIAR_486 */ jnz LDenormal_adj_exponent orl %ebx,%ebx jz L_underflow_to_zero /* The contents are zero */ LDenormal_adj_exponent: decw EXP(%edi) LPseudoDenormal: testb $0xff,FPU_bits_lost /* bits lost == underflow */ movl TAG_Special,%edx jz L_deNormalised /* There must be a masked underflow */ push %eax pushl EX_Underflow call EXCEPTION popl %eax popl %eax movl TAG_Special,%edx jmp L_deNormalised /* * The operations resulted in a number too small to represent. * Masked response. */ L_underflow_to_zero: push %eax call set_precision_flag_down popl %eax push %eax pushl EX_Underflow call EXCEPTION popl %eax popl %eax /* Reduce the exponent to EXP_UNDER */ movw EXP_UNDER,EXP(%edi) movl TAG_Zero,%edx jmp L_Store_significand /* The operations resulted in a number too large to represent. */ L_overflow: addw EXTENDED_Ebias,EXP(%edi) /* Set for unmasked response. */ push %edi call arith_overflow pop %edi jmp fpu_reg_round_signed_special_exit Signal_underflow: /* The number may have been changed to a non-denormal */ /* by the rounding operations. */ cmpw EXP_UNDER,EXP(%edi) jle Do_unmasked_underflow jmp L_Normalised Do_unmasked_underflow: /* Increase the exponent by the magic number */ addw $(3*(1<<13)),EXP(%edi) push %eax pushl EX_Underflow call EXCEPTION popl %eax popl %eax jmp L_Normalised #ifdef PARANOID #ifdef PECULIAR_486 L_bugged_denorm_486: pushl EX_INTERNAL|0x236 call EXCEPTION popl %ebx jmp L_exception_exit #else L_bugged_denorm: pushl EX_INTERNAL|0x230 call EXCEPTION popl %ebx jmp L_exception_exit #endif /* PECULIAR_486 */ L_bugged_round24: pushl EX_INTERNAL|0x231 call EXCEPTION popl %ebx jmp L_exception_exit L_bugged_round53: pushl EX_INTERNAL|0x232 call EXCEPTION popl %ebx jmp L_exception_exit L_bugged_round64: pushl EX_INTERNAL|0x233 call EXCEPTION popl %ebx jmp L_exception_exit L_norm_bugged: pushl EX_INTERNAL|0x234 call EXCEPTION popl %ebx jmp L_exception_exit L_entry_bugged: pushl EX_INTERNAL|0x235 call EXCEPTION popl %ebx L_exception_exit: mov $-1,%eax jmp fpu_reg_round_special_exit #endif /* PARANOID */ ENDPROC(FPU_round)
AirFortressIlikara/LS2K0300-linux-4.19
3,384
arch/x86/math-emu/round_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | round_Xsig.S | | | | Copyright (C) 1992,1993,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Normalize and round a 12 byte quantity. | | Call from C as: | | int round_Xsig(Xsig *n) | | | | Normalize a 12 byte quantity. | | Call from C as: | | int norm_Xsig(Xsig *n) | | | | Each function returns the size of the shift (nr of bits). | | | +---------------------------------------------------------------------------*/ .file "round_Xsig.S" #include "fpu_emu.h" .text ENTRY(round_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ pushl %ebx pushl %esi movl PARAM1,%esi movl 8(%esi),%edx movl 4(%esi),%ebx movl (%esi),%eax movl $0,-4(%ebp) orl %edx,%edx /* ms bits */ js L_round /* Already normalized */ jnz L_shift_1 /* Shift left 1 - 31 bits */ movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax movl $-32,-4(%ebp) /* We need to shift left by 1 - 31 bits */ L_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx subl %ecx,-4(%ebp) shld %cl,%ebx,%edx shld %cl,%eax,%ebx shl %cl,%eax L_round: testl $0x80000000,%eax jz L_exit addl $1,%ebx adcl $0,%edx jnz L_exit movl $0x80000000,%edx incl -4(%ebp) L_exit: movl %edx,8(%esi) movl %ebx,4(%esi) movl %eax,(%esi) movl -4(%ebp),%eax popl %esi popl %ebx leave ret ENDPROC(round_Xsig) ENTRY(norm_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ pushl %ebx pushl %esi movl PARAM1,%esi movl 8(%esi),%edx movl 4(%esi),%ebx movl (%esi),%eax movl $0,-4(%ebp) orl %edx,%edx /* ms bits */ js L_n_exit /* Already normalized */ jnz L_n_shift_1 /* Shift left 1 - 31 bits */ movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax movl $-32,-4(%ebp) orl %edx,%edx /* ms bits */ js L_n_exit /* Normalized now */ jnz L_n_shift_1 /* Shift left 1 - 31 bits */ movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax addl $-32,-4(%ebp) jmp L_n_exit /* Might not be normalized, but shift no more. */ /* We need to shift left by 1 - 31 bits */ L_n_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx subl %ecx,-4(%ebp) shld %cl,%ebx,%edx shld %cl,%eax,%ebx shl %cl,%eax L_n_exit: movl %edx,8(%esi) movl %ebx,4(%esi) movl %eax,(%esi) movl -4(%ebp),%eax popl %esi popl %ebx leave ret ENDPROC(norm_Xsig)
AirFortressIlikara/LS2K0300-linux-4.19
11,026
arch/x86/math-emu/wm_sqrt.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "wm_sqrt.S" /*---------------------------------------------------------------------------+ | wm_sqrt.S | | | | Fixed point arithmetic square root evaluation. | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | Call from C as: | | int wm_sqrt(FPU_REG *n, unsigned int control_word) | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | wm_sqrt(FPU_REG *n, unsigned int control_word) | | returns the square root of n in n. | | | | Use Newton's method to compute the square root of a number, which must | | be in the range [1.0 .. 4.0), to 64 bits accuracy. | | Does not check the sign or tag of the argument. | | Sets the exponent, but not the sign or tag of the result. | | | | The guess is kept in %esi:%edi | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #ifndef NON_REENTRANT_FPU /* Local storage on the stack: */ #define FPU_accum_3 -4(%ebp) /* ms word */ #define FPU_accum_2 -8(%ebp) #define FPU_accum_1 -12(%ebp) #define FPU_accum_0 -16(%ebp) /* * The de-normalised argument: * sq_2 sq_1 sq_0 * b b b b b b b ... b b b b b b .... b b b b 0 0 0 ... 0 * ^ binary point here */ #define FPU_fsqrt_arg_2 -20(%ebp) /* ms word */ #define FPU_fsqrt_arg_1 -24(%ebp) #define FPU_fsqrt_arg_0 -28(%ebp) /* ls word, at most the ms bit is set */ #else /* Local storage in a static area: */ .data .align 4,0 FPU_accum_3: .long 0 /* ms word */ FPU_accum_2: .long 0 FPU_accum_1: .long 0 FPU_accum_0: .long 0 /* The de-normalised argument: sq_2 sq_1 sq_0 b b b b b b b ... b b b b b b .... b b b b 0 0 0 ... 0 ^ binary point here */ FPU_fsqrt_arg_2: .long 0 /* ms word */ FPU_fsqrt_arg_1: .long 0 FPU_fsqrt_arg_0: .long 0 /* ls word, at most the ms bit is set */ #endif /* NON_REENTRANT_FPU */ .text ENTRY(wm_sqrt) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $28,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi movl SIGH(%esi),%eax movl SIGL(%esi),%ecx xorl %edx,%edx /* We use a rough linear estimate for the first guess.. */ cmpw EXP_BIAS,EXP(%esi) jnz sqrt_arg_ge_2 shrl $1,%eax /* arg is in the range [1.0 .. 2.0) */ rcrl $1,%ecx rcrl $1,%edx sqrt_arg_ge_2: /* From here on, n is never accessed directly again until it is replaced by the answer. */ movl %eax,FPU_fsqrt_arg_2 /* ms word of n */ movl %ecx,FPU_fsqrt_arg_1 movl %edx,FPU_fsqrt_arg_0 /* Make a linear first estimate */ shrl $1,%eax addl $0x40000000,%eax movl $0xaaaaaaaa,%ecx mull %ecx shll %edx /* max result was 7fff... */ testl $0x80000000,%edx /* but min was 3fff... */ jnz sqrt_prelim_no_adjust movl $0x80000000,%edx /* round up */ sqrt_prelim_no_adjust: movl %edx,%esi /* Our first guess */ /* We have now computed (approx) (2 + x) / 3, which forms the basis for a few iterations of Newton's method */ movl FPU_fsqrt_arg_2,%ecx /* ms word */ /* * From our initial estimate, three iterations are enough to get us * to 30 bits or so. This will then allow two iterations at better * precision to complete the process. */ /* Compute (g + n/g)/2 at each iteration (g is the guess). */ shrl %ecx /* Doing this first will prevent a divide */ /* overflow later. */ movl %ecx,%edx /* msw of the arg / 2 */ divl %esi /* current estimate */ shrl %esi /* divide by 2 */ addl %eax,%esi /* the new estimate */ movl %ecx,%edx divl %esi shrl %esi addl %eax,%esi movl %ecx,%edx divl %esi shrl %esi addl %eax,%esi /* * Now that an estimate accurate to about 30 bits has been obtained (in %esi), * we improve it to 60 bits or so. * * The strategy from now on is to compute new estimates from * guess := guess + (n - guess^2) / (2 * guess) */ /* First, find the square of the guess */ movl %esi,%eax mull %esi /* guess^2 now in %edx:%eax */ movl FPU_fsqrt_arg_1,%ecx subl %ecx,%eax movl FPU_fsqrt_arg_2,%ecx /* ms word of normalized n */ sbbl %ecx,%edx jnc sqrt_stage_2_positive /* Subtraction gives a negative result, negate the result before division. */ notl %edx notl %eax addl $1,%eax adcl $0,%edx divl %esi movl %eax,%ecx movl %edx,%eax divl %esi jmp sqrt_stage_2_finish sqrt_stage_2_positive: divl %esi movl %eax,%ecx movl %edx,%eax divl %esi notl %ecx notl %eax addl $1,%eax adcl $0,%ecx sqrt_stage_2_finish: sarl $1,%ecx /* divide by 2 */ rcrl $1,%eax /* Form the new estimate in %esi:%edi */ movl %eax,%edi addl %ecx,%esi jnz sqrt_stage_2_done /* result should be [1..2) */ #ifdef PARANOID /* It should be possible to get here only if the arg is ffff....ffff */ cmpl $0xffffffff,FPU_fsqrt_arg_1 jnz sqrt_stage_2_error #endif /* PARANOID */ /* The best rounded result. */ xorl %eax,%eax decl %eax movl %eax,%edi movl %eax,%esi movl $0x7fffffff,%eax jmp sqrt_round_result #ifdef PARANOID sqrt_stage_2_error: pushl EX_INTERNAL|0x213 call EXCEPTION #endif /* PARANOID */ sqrt_stage_2_done: /* Now the square root has been computed to better than 60 bits. */ /* Find the square of the guess. */ movl %edi,%eax /* ls word of guess */ mull %edi movl %edx,FPU_accum_1 movl %esi,%eax mull %esi movl %edx,FPU_accum_3 movl %eax,FPU_accum_2 movl %edi,%eax mull %esi addl %eax,FPU_accum_1 adcl %edx,FPU_accum_2 adcl $0,FPU_accum_3 /* movl %esi,%eax */ /* mull %edi */ addl %eax,FPU_accum_1 adcl %edx,FPU_accum_2 adcl $0,FPU_accum_3 /* guess^2 now in FPU_accum_3:FPU_accum_2:FPU_accum_1 */ movl FPU_fsqrt_arg_0,%eax /* get normalized n */ subl %eax,FPU_accum_1 movl FPU_fsqrt_arg_1,%eax sbbl %eax,FPU_accum_2 movl FPU_fsqrt_arg_2,%eax /* ms word of normalized n */ sbbl %eax,FPU_accum_3 jnc sqrt_stage_3_positive /* Subtraction gives a negative result, negate the result before division */ notl FPU_accum_1 notl FPU_accum_2 notl FPU_accum_3 addl $1,FPU_accum_1 adcl $0,FPU_accum_2 #ifdef PARANOID adcl $0,FPU_accum_3 /* This must be zero */ jz sqrt_stage_3_no_error sqrt_stage_3_error: pushl EX_INTERNAL|0x207 call EXCEPTION sqrt_stage_3_no_error: #endif /* PARANOID */ movl FPU_accum_2,%edx movl FPU_accum_1,%eax divl %esi movl %eax,%ecx movl %edx,%eax divl %esi sarl $1,%ecx /* divide by 2 */ rcrl $1,%eax /* prepare to round the result */ addl %ecx,%edi adcl $0,%esi jmp sqrt_stage_3_finished sqrt_stage_3_positive: movl FPU_accum_2,%edx movl FPU_accum_1,%eax divl %esi movl %eax,%ecx movl %edx,%eax divl %esi sarl $1,%ecx /* divide by 2 */ rcrl $1,%eax /* prepare to round the result */ notl %eax /* Negate the correction term */ notl %ecx addl $1,%eax adcl $0,%ecx /* carry here ==> correction == 0 */ adcl $0xffffffff,%esi addl %ecx,%edi adcl $0,%esi sqrt_stage_3_finished: /* * The result in %esi:%edi:%esi should be good to about 90 bits here, * and the rounding information here does not have sufficient accuracy * in a few rare cases. */ cmpl $0xffffffe0,%eax ja sqrt_near_exact_x cmpl $0x00000020,%eax jb sqrt_near_exact cmpl $0x7fffffe0,%eax jb sqrt_round_result cmpl $0x80000020,%eax jb sqrt_get_more_precision sqrt_round_result: /* Set up for rounding operations */ movl %eax,%edx movl %esi,%eax movl %edi,%ebx movl PARAM1,%edi movw EXP_BIAS,EXP(%edi) /* Result is in [1.0 .. 2.0) */ jmp fpu_reg_round sqrt_near_exact_x: /* First, the estimate must be rounded up. */ addl $1,%edi adcl $0,%esi sqrt_near_exact: /* * This is an easy case because x^1/2 is monotonic. * We need just find the square of our estimate, compare it * with the argument, and deduce whether our estimate is * above, below, or exact. We use the fact that the estimate * is known to be accurate to about 90 bits. */ movl %edi,%eax /* ls word of guess */ mull %edi movl %edx,%ebx /* 2nd ls word of square */ movl %eax,%ecx /* ls word of square */ movl %edi,%eax mull %esi addl %eax,%ebx addl %eax,%ebx #ifdef PARANOID cmp $0xffffffb0,%ebx jb sqrt_near_exact_ok cmp $0x00000050,%ebx ja sqrt_near_exact_ok pushl EX_INTERNAL|0x214 call EXCEPTION sqrt_near_exact_ok: #endif /* PARANOID */ or %ebx,%ebx js sqrt_near_exact_small jnz sqrt_near_exact_large or %ebx,%edx jnz sqrt_near_exact_large /* Our estimate is exactly the right answer */ xorl %eax,%eax jmp sqrt_round_result sqrt_near_exact_small: /* Our estimate is too small */ movl $0x000000ff,%eax jmp sqrt_round_result sqrt_near_exact_large: /* Our estimate is too large, we need to decrement it */ subl $1,%edi sbbl $0,%esi movl $0xffffff00,%eax jmp sqrt_round_result sqrt_get_more_precision: /* This case is almost the same as the above, except we start with an extra bit of precision in the estimate. */ stc /* The extra bit. */ rcll $1,%edi /* Shift the estimate left one bit */ rcll $1,%esi movl %edi,%eax /* ls word of guess */ mull %edi movl %edx,%ebx /* 2nd ls word of square */ movl %eax,%ecx /* ls word of square */ movl %edi,%eax mull %esi addl %eax,%ebx addl %eax,%ebx /* Put our estimate back to its original value */ stc /* The ms bit. */ rcrl $1,%esi /* Shift the estimate left one bit */ rcrl $1,%edi #ifdef PARANOID cmp $0xffffff60,%ebx jb sqrt_more_prec_ok cmp $0x000000a0,%ebx ja sqrt_more_prec_ok pushl EX_INTERNAL|0x215 call EXCEPTION sqrt_more_prec_ok: #endif /* PARANOID */ or %ebx,%ebx js sqrt_more_prec_small jnz sqrt_more_prec_large or %ebx,%ecx jnz sqrt_more_prec_large /* Our estimate is exactly the right answer */ movl $0x80000000,%eax jmp sqrt_round_result sqrt_more_prec_small: /* Our estimate is too small */ movl $0x800000ff,%eax jmp sqrt_round_result sqrt_more_prec_large: /* Our estimate is too large */ movl $0x7fffff00,%eax jmp sqrt_round_result ENDPROC(wm_sqrt)
AirFortressIlikara/LS2K0300-linux-4.19
2,514
arch/x86/math-emu/shr_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "shr_Xsig.S" /*---------------------------------------------------------------------------+ | shr_Xsig.S | | | | 12 byte right shift function | | | | Copyright (C) 1992,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | void shr_Xsig(Xsig *arg, unsigned nr) | | | | Extended shift right function. | | Fastest for small shifts. | | Shifts the 12 byte quantity pointed to by the first arg (arg) | | right by the number of bits specified by the second arg (nr). | | | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text ENTRY(shr_Xsig) push %ebp movl %esp,%ebp pushl %esi movl PARAM2,%ecx movl PARAM1,%esi cmpl $32,%ecx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ pushl %ebx movl (%esi),%eax /* lsl */ movl 4(%esi),%ebx /* midl */ movl 8(%esi),%edx /* msl */ shrd %cl,%ebx,%eax shrd %cl,%edx,%ebx shr %cl,%edx movl %eax,(%esi) movl %ebx,4(%esi) movl %edx,8(%esi) popl %ebx popl %esi leave ret L_more_than_31: cmpl $64,%ecx jnc L_more_than_63 subb $32,%cl movl 4(%esi),%eax /* midl */ movl 8(%esi),%edx /* msl */ shrd %cl,%edx,%eax shr %cl,%edx movl %eax,(%esi) movl %edx,4(%esi) movl $0,8(%esi) popl %esi leave ret L_more_than_63: cmpl $96,%ecx jnc L_more_than_95 subb $64,%cl movl 8(%esi),%eax /* msl */ shr %cl,%eax xorl %edx,%edx movl %eax,(%esi) movl %edx,4(%esi) movl %edx,8(%esi) popl %esi leave ret L_more_than_95: xorl %eax,%eax movl %eax,(%esi) movl %eax,4(%esi) movl %eax,8(%esi) popl %esi leave ret ENDPROC(shr_Xsig)
AirFortressIlikara/LS2K0300-linux-4.19
3,701
arch/x86/math-emu/reg_u_mul.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_mul.S" /*---------------------------------------------------------------------------+ | reg_u_mul.S | | | | Core multiplication routine | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Basic multiplication routine. | | Does not check the resulting exponent for overflow/underflow | | | | FPU_u_mul(FPU_REG *a, FPU_REG *b, FPU_REG *c, unsigned int cw); | | | | Internal working is at approx 128 bits. | | Result is rounded to nearest 53 or 64 bits, using "nearest or even". | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" #ifndef NON_REENTRANT_FPU /* Local storage on the stack: */ #define FPU_accum_0 -4(%ebp) /* ms word */ #define FPU_accum_1 -8(%ebp) #else /* Local storage in a static area: */ .data .align 4,0 FPU_accum_0: .long 0 FPU_accum_1: .long 0 #endif /* NON_REENTRANT_FPU */ .text ENTRY(FPU_u_mul) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $8,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi movl PARAM2,%edi #ifdef PARANOID testl $0x80000000,SIGH(%esi) jz L_bugged testl $0x80000000,SIGH(%edi) jz L_bugged #endif /* PARANOID */ xorl %ecx,%ecx xorl %ebx,%ebx movl SIGL(%esi),%eax mull SIGL(%edi) movl %eax,FPU_accum_0 movl %edx,FPU_accum_1 movl SIGL(%esi),%eax mull SIGH(%edi) addl %eax,FPU_accum_1 adcl %edx,%ebx /* adcl $0,%ecx // overflow here is not possible */ movl SIGH(%esi),%eax mull SIGL(%edi) addl %eax,FPU_accum_1 adcl %edx,%ebx adcl $0,%ecx movl SIGH(%esi),%eax mull SIGH(%edi) addl %eax,%ebx adcl %edx,%ecx /* Get the sum of the exponents. */ movl PARAM6,%eax subl EXP_BIAS-1,%eax /* Two denormals can cause an exponent underflow */ cmpl EXP_WAY_UNDER,%eax jg Exp_not_underflow /* Set to a really low value allow correct handling */ movl EXP_WAY_UNDER,%eax Exp_not_underflow: /* Have now finished with the sources */ movl PARAM3,%edi /* Point to the destination */ movw %ax,EXP(%edi) /* Now make sure that the result is normalized */ testl $0x80000000,%ecx jnz LResult_Normalised /* Normalize by shifting left one bit */ shll $1,FPU_accum_0 rcll $1,FPU_accum_1 rcll $1,%ebx rcll $1,%ecx decw EXP(%edi) LResult_Normalised: movl FPU_accum_0,%eax movl FPU_accum_1,%edx orl %eax,%eax jz L_extent_zero orl $1,%edx L_extent_zero: movl %ecx,%eax jmp fpu_reg_round #ifdef PARANOID L_bugged: pushl EX_INTERNAL|0x205 call EXCEPTION pop %ebx jmp L_exit L_exit: popl %ebx popl %edi popl %esi leave ret #endif /* PARANOID */ ENDPROC(FPU_u_mul)
AirFortressIlikara/LS2K0300-linux-4.19
4,279
arch/x86/math-emu/mul_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | mul_Xsig.S | | | | Multiply a 12 byte fixed point number by another fixed point number. | | | | Copyright (C) 1992,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | void mul32_Xsig(Xsig *x, unsigned b) | | | | void mul64_Xsig(Xsig *x, unsigned long long *b) | | | | void mul_Xsig_Xsig(Xsig *x, unsigned *b) | | | | The result is neither rounded nor normalized, and the ls bit or so may | | be wrong. | | | +---------------------------------------------------------------------------*/ .file "mul_Xsig.S" #include "fpu_emu.h" .text ENTRY(mul32_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp pushl %esi movl PARAM1,%esi movl PARAM2,%ecx xor %eax,%eax movl %eax,-4(%ebp) movl %eax,-8(%ebp) movl (%esi),%eax /* lsl of Xsig */ mull %ecx /* msl of b */ movl %edx,-12(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull %ecx /* msl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull %ecx /* msl of b */ addl %eax,-8(%ebp) adcl %edx,-4(%ebp) movl -12(%ebp),%eax movl %eax,(%esi) movl -8(%ebp),%eax movl %eax,4(%esi) movl -4(%ebp),%eax movl %eax,8(%esi) popl %esi leave ret ENDPROC(mul32_Xsig) ENTRY(mul64_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp pushl %esi movl PARAM1,%esi movl PARAM2,%ecx xor %eax,%eax movl %eax,-4(%ebp) movl %eax,-8(%ebp) movl (%esi),%eax /* lsl of Xsig */ mull 4(%ecx) /* msl of b */ movl %edx,-12(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull (%ecx) /* lsl of b */ addl %edx,-12(%ebp) adcl $0,-8(%ebp) adcl $0,-4(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull 4(%ecx) /* msl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull (%ecx) /* lsl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull 4(%ecx) /* msl of b */ addl %eax,-8(%ebp) adcl %edx,-4(%ebp) movl -12(%ebp),%eax movl %eax,(%esi) movl -8(%ebp),%eax movl %eax,4(%esi) movl -4(%ebp),%eax movl %eax,8(%esi) popl %esi leave ret ENDPROC(mul64_Xsig) ENTRY(mul_Xsig_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp pushl %esi movl PARAM1,%esi movl PARAM2,%ecx xor %eax,%eax movl %eax,-4(%ebp) movl %eax,-8(%ebp) movl (%esi),%eax /* lsl of Xsig */ mull 8(%ecx) /* msl of b */ movl %edx,-12(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull 4(%ecx) /* midl of b */ addl %edx,-12(%ebp) adcl $0,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull (%ecx) /* lsl of b */ addl %edx,-12(%ebp) adcl $0,-8(%ebp) adcl $0,-4(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull 8(%ecx) /* msl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull 4(%ecx) /* midl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull 8(%ecx) /* msl of b */ addl %eax,-8(%ebp) adcl %edx,-4(%ebp) movl -12(%ebp),%edx movl %edx,(%esi) movl -8(%ebp),%edx movl %edx,4(%esi) movl -4(%ebp),%edx movl %edx,8(%esi) popl %esi leave ret ENDPROC(mul_Xsig_Xsig)
AirFortressIlikara/LS2K0300-linux-4.19
6,175
arch/x86/math-emu/reg_u_sub.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_sub.S" /*---------------------------------------------------------------------------+ | reg_u_sub.S | | | | Core floating point subtraction routine. | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | Call from C as: | | int FPU_u_sub(FPU_REG *arg1, FPU_REG *arg2, FPU_REG *answ, | | int control_w) | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ /* | Kernel subtraction routine FPU_u_sub(reg *arg1, reg *arg2, reg *answ). | Takes two valid reg f.p. numbers (TAG_Valid), which are | treated as unsigned numbers, | and returns their difference as a TAG_Valid or TAG_Zero f.p. | number. | The first number (arg1) must be the larger. | The returned number is normalized. | Basic checks are performed if PARANOID is defined. */ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" .text ENTRY(FPU_u_sub) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi /* source 1 */ movl PARAM2,%edi /* source 2 */ movl PARAM6,%ecx subl PARAM7,%ecx /* exp1 - exp2 */ #ifdef PARANOID /* source 2 is always smaller than source 1 */ js L_bugged_1 testl $0x80000000,SIGH(%edi) /* The args are assumed to be be normalized */ je L_bugged_2 testl $0x80000000,SIGH(%esi) je L_bugged_2 #endif /* PARANOID */ /*--------------------------------------+ | Form a register holding the | | smaller number | +--------------------------------------*/ movl SIGH(%edi),%eax /* register ms word */ movl SIGL(%edi),%ebx /* register ls word */ movl PARAM3,%edi /* destination */ movl PARAM6,%edx movw %dx,EXP(%edi) /* Copy exponent to destination */ xorl %edx,%edx /* register extension */ /*--------------------------------------+ | Shift the temporary register | | right the required number of | | places. | +--------------------------------------*/ cmpw $32,%cx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax jmp L_shift_done L_more_than_31: cmpw $64,%cx jnc L_more_than_63 subb $32,%cl jz L_exactly_32 shrd %cl,%eax,%edx shr %cl,%eax orl %ebx,%ebx jz L_more_31_no_low /* none of the lowest bits is set */ orl $1,%edx /* record the fact in the extension */ L_more_31_no_low: movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_exactly_32: movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_more_than_63: cmpw $65,%cx jnc L_more_than_64 /* Shift right by 64 bits */ movl %eax,%edx orl %ebx,%ebx jz L_more_63_no_low orl $1,%edx jmp L_more_63_no_low L_more_than_64: jne L_more_than_65 /* Shift right by 65 bits */ /* Carry is clear if we get here */ movl %eax,%edx rcrl %edx jnc L_shift_65_nc orl $1,%edx jmp L_more_63_no_low L_shift_65_nc: orl %ebx,%ebx jz L_more_63_no_low orl $1,%edx jmp L_more_63_no_low L_more_than_65: movl $1,%edx /* The shifted nr always at least one '1' */ L_more_63_no_low: xorl %ebx,%ebx xorl %eax,%eax L_shift_done: L_subtr: /*------------------------------+ | Do the subtraction | +------------------------------*/ xorl %ecx,%ecx subl %edx,%ecx movl %ecx,%edx movl SIGL(%esi),%ecx sbbl %ebx,%ecx movl %ecx,%ebx movl SIGH(%esi),%ecx sbbl %eax,%ecx movl %ecx,%eax #ifdef PARANOID /* We can never get a borrow */ jc L_bugged #endif /* PARANOID */ /*--------------------------------------+ | Normalize the result | +--------------------------------------*/ testl $0x80000000,%eax jnz L_round /* no shifting needed */ orl %eax,%eax jnz L_shift_1 /* shift left 1 - 31 bits */ orl %ebx,%ebx jnz L_shift_32 /* shift left 32 - 63 bits */ /* * A rare case, the only one which is non-zero if we got here * is: 1000000 .... 0000 * -0111111 .... 1111 1 * -------------------- * 0000000 .... 0000 1 */ cmpl $0x80000000,%edx jnz L_must_be_zero /* Shift left 64 bits */ subw $64,EXP(%edi) xchg %edx,%eax jmp fpu_reg_round L_must_be_zero: #ifdef PARANOID orl %edx,%edx jnz L_bugged_3 #endif /* PARANOID */ /* The result is zero */ movw $0,EXP(%edi) /* exponent */ movl $0,SIGL(%edi) movl $0,SIGH(%edi) movl TAG_Zero,%eax jmp L_exit L_shift_32: movl %ebx,%eax movl %edx,%ebx movl $0,%edx subw $32,EXP(%edi) /* Can get underflow here */ /* We need to shift left by 1 - 31 bits */ L_shift_1: bsrl %eax,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx shld %cl,%ebx,%eax shld %cl,%edx,%ebx shl %cl,%edx subw %cx,EXP(%edi) /* Can get underflow here */ L_round: jmp fpu_reg_round /* Round the result */ #ifdef PARANOID L_bugged_1: pushl EX_INTERNAL|0x206 call EXCEPTION pop %ebx jmp L_error_exit L_bugged_2: pushl EX_INTERNAL|0x209 call EXCEPTION pop %ebx jmp L_error_exit L_bugged_3: pushl EX_INTERNAL|0x210 call EXCEPTION pop %ebx jmp L_error_exit L_bugged_4: pushl EX_INTERNAL|0x211 call EXCEPTION pop %ebx jmp L_error_exit L_bugged: pushl EX_INTERNAL|0x212 call EXCEPTION pop %ebx jmp L_error_exit L_error_exit: movl $-1,%eax #endif /* PARANOID */ L_exit: popl %ebx popl %edi popl %esi leave ret ENDPROC(FPU_u_sub)
AirFortressIlikara/LS2K0300-linux-4.19
1,607
arch/x86/math-emu/div_small.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "div_small.S" /*---------------------------------------------------------------------------+ | div_small.S | | | | Divide a 64 bit integer by a 32 bit integer & return remainder. | | | | Copyright (C) 1992,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | unsigned long FPU_div_small(unsigned long long *x, unsigned long y) | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text ENTRY(FPU_div_small) pushl %ebp movl %esp,%ebp pushl %esi movl PARAM1,%esi /* pointer to num */ movl PARAM2,%ecx /* The denominator */ movl 4(%esi),%eax /* Get the current num msw */ xorl %edx,%edx divl %ecx movl %eax,4(%esi) movl (%esi),%eax /* Get the num lsw */ divl %ecx movl %eax,(%esi) movl %edx,%eax /* Return the remainder in eax */ popl %esi leave ret ENDPROC(FPU_div_small)
AirFortressIlikara/LS2K0300-linux-4.19
4,030
arch/x86/math-emu/reg_u_add.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_add.S" /*---------------------------------------------------------------------------+ | reg_u_add.S | | | | Add two valid (TAG_Valid) FPU_REG numbers, of the same sign, and put the | | result in a destination FPU_REG. | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | Call from C as: | | int FPU_u_add(FPU_REG *arg1, FPU_REG *arg2, FPU_REG *answ, | | int control_w) | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ /* | Kernel addition routine FPU_u_add(reg *arg1, reg *arg2, reg *answ). | Takes two valid reg f.p. numbers (TAG_Valid), which are | treated as unsigned numbers, | and returns their sum as a TAG_Valid or TAG_Special f.p. number. | The returned number is normalized. | Basic checks are performed if PARANOID is defined. */ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" .text ENTRY(FPU_u_add) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi /* source 1 */ movl PARAM2,%edi /* source 2 */ movl PARAM6,%ecx movl %ecx,%edx subl PARAM7,%ecx /* exp1 - exp2 */ jge L_arg1_larger /* num1 is smaller */ movl SIGL(%esi),%ebx movl SIGH(%esi),%eax movl %edi,%esi movl PARAM7,%edx negw %cx jmp L_accum_loaded L_arg1_larger: /* num1 has larger or equal exponent */ movl SIGL(%edi),%ebx movl SIGH(%edi),%eax L_accum_loaded: movl PARAM3,%edi /* destination */ movw %dx,EXP(%edi) /* Copy exponent to destination */ xorl %edx,%edx /* clear the extension */ #ifdef PARANOID testl $0x80000000,%eax je L_bugged testl $0x80000000,SIGH(%esi) je L_bugged #endif /* PARANOID */ /* The number to be shifted is in %eax:%ebx:%edx */ cmpw $32,%cx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax jmp L_shift_done L_more_than_31: cmpw $64,%cx jnc L_more_than_63 subb $32,%cl jz L_exactly_32 shrd %cl,%eax,%edx shr %cl,%eax orl %ebx,%ebx jz L_more_31_no_low /* none of the lowest bits is set */ orl $1,%edx /* record the fact in the extension */ L_more_31_no_low: movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_exactly_32: movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_more_than_63: cmpw $65,%cx jnc L_more_than_64 movl %eax,%edx orl %ebx,%ebx jz L_more_63_no_low orl $1,%edx jmp L_more_63_no_low L_more_than_64: movl $1,%edx /* The shifted nr always at least one '1' */ L_more_63_no_low: xorl %ebx,%ebx xorl %eax,%eax L_shift_done: /* Now do the addition */ addl SIGL(%esi),%ebx adcl SIGH(%esi),%eax jnc L_round_the_result /* Overflow, adjust the result */ rcrl $1,%eax rcrl $1,%ebx rcrl $1,%edx jnc L_no_bit_lost orl $1,%edx L_no_bit_lost: incw EXP(%edi) L_round_the_result: jmp fpu_reg_round /* Round the result */ #ifdef PARANOID /* If we ever get here then we have problems! */ L_bugged: pushl EX_INTERNAL|0x201 call EXCEPTION pop %ebx movl $-1,%eax jmp L_exit L_exit: popl %ebx popl %edi popl %esi leave ret #endif /* PARANOID */ ENDPROC(FPU_u_add)
AirFortressIlikara/LS2K0300-linux-4.19
3,681
arch/x86/math-emu/reg_norm.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | reg_norm.S | | | | Copyright (C) 1992,1993,1994,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | Normalize the value in a FPU_REG. | | | | Call from C as: | | int FPU_normalize(FPU_REG *n) | | | | int FPU_normalize_nuo(FPU_REG *n) | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text ENTRY(FPU_normalize) pushl %ebp movl %esp,%ebp pushl %ebx movl PARAM1,%ebx movl SIGH(%ebx),%edx movl SIGL(%ebx),%eax orl %edx,%edx /* ms bits */ js L_done /* Already normalized */ jnz L_shift_1 /* Shift left 1 - 31 bits */ orl %eax,%eax jz L_zero /* The contents are zero */ movl %eax,%edx xorl %eax,%eax subw $32,EXP(%ebx) /* This can cause an underflow */ /* We need to shift left by 1 - 31 bits */ L_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx shld %cl,%eax,%edx shl %cl,%eax subw %cx,EXP(%ebx) /* This can cause an underflow */ movl %edx,SIGH(%ebx) movl %eax,SIGL(%ebx) L_done: cmpw EXP_OVER,EXP(%ebx) jge L_overflow cmpw EXP_UNDER,EXP(%ebx) jle L_underflow L_exit_valid: movl TAG_Valid,%eax /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) andw $0x7fff,EXP(%ebx) L_exit: popl %ebx leave ret L_zero: movw $0,EXP(%ebx) movl TAG_Zero,%eax jmp L_exit L_underflow: /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) push %ebx call arith_underflow pop %ebx jmp L_exit L_overflow: /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) push %ebx call arith_overflow pop %ebx jmp L_exit ENDPROC(FPU_normalize) /* Normalise without reporting underflow or overflow */ ENTRY(FPU_normalize_nuo) pushl %ebp movl %esp,%ebp pushl %ebx movl PARAM1,%ebx movl SIGH(%ebx),%edx movl SIGL(%ebx),%eax orl %edx,%edx /* ms bits */ js L_exit_nuo_valid /* Already normalized */ jnz L_nuo_shift_1 /* Shift left 1 - 31 bits */ orl %eax,%eax jz L_exit_nuo_zero /* The contents are zero */ movl %eax,%edx xorl %eax,%eax subw $32,EXP(%ebx) /* This can cause an underflow */ /* We need to shift left by 1 - 31 bits */ L_nuo_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx shld %cl,%eax,%edx shl %cl,%eax subw %cx,EXP(%ebx) /* This can cause an underflow */ movl %edx,SIGH(%ebx) movl %eax,SIGL(%ebx) L_exit_nuo_valid: movl TAG_Valid,%eax popl %ebx leave ret L_exit_nuo_zero: movl TAG_Zero,%eax movw EXP_UNDER,EXP(%ebx) popl %ebx leave ret ENDPROC(FPU_normalize_nuo)
AirFortressIlikara/LS2K0300-linux-4.19
6,284
arch/x86/math-emu/wm_shrx.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "wm_shrx.S" /*---------------------------------------------------------------------------+ | wm_shrx.S | | | | 64 bit right shift functions | | | | Copyright (C) 1992,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | unsigned FPU_shrx(void *arg1, unsigned arg2) | | and | | unsigned FPU_shrxs(void *arg1, unsigned arg2) | | | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text /*---------------------------------------------------------------------------+ | unsigned FPU_shrx(void *arg1, unsigned arg2) | | | | Extended shift right function. | | Fastest for small shifts. | | Shifts the 64 bit quantity pointed to by the first arg (arg1) | | right by the number of bits specified by the second arg (arg2). | | Forms a 96 bit quantity from the 64 bit arg and eax: | | [ 64 bit arg ][ eax ] | | shift right ---------> | | The eax register is initialized to 0 before the shifting. | | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ ENTRY(FPU_shrx) push %ebp movl %esp,%ebp pushl %esi movl PARAM2,%ecx movl PARAM1,%esi cmpl $32,%ecx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ pushl %ebx movl (%esi),%ebx /* lsl */ movl 4(%esi),%edx /* msl */ xorl %eax,%eax /* extension */ shrd %cl,%ebx,%eax shrd %cl,%edx,%ebx shr %cl,%edx movl %ebx,(%esi) movl %edx,4(%esi) popl %ebx popl %esi leave ret L_more_than_31: cmpl $64,%ecx jnc L_more_than_63 subb $32,%cl movl (%esi),%eax /* lsl */ movl 4(%esi),%edx /* msl */ shrd %cl,%edx,%eax shr %cl,%edx movl %edx,(%esi) movl $0,4(%esi) popl %esi leave ret L_more_than_63: cmpl $96,%ecx jnc L_more_than_95 subb $64,%cl movl 4(%esi),%eax /* msl */ shr %cl,%eax xorl %edx,%edx movl %edx,(%esi) movl %edx,4(%esi) popl %esi leave ret L_more_than_95: xorl %eax,%eax movl %eax,(%esi) movl %eax,4(%esi) popl %esi leave ret ENDPROC(FPU_shrx) /*---------------------------------------------------------------------------+ | unsigned FPU_shrxs(void *arg1, unsigned arg2) | | | | Extended shift right function (optimized for small floating point | | integers). | | Shifts the 64 bit quantity pointed to by the first arg (arg1) | | right by the number of bits specified by the second arg (arg2). | | Forms a 96 bit quantity from the 64 bit arg and eax: | | [ 64 bit arg ][ eax ] | | shift right ---------> | | The eax register is initialized to 0 before the shifting. | | The lower 8 bits of eax are lost and replaced by a flag which is | | set (to 0x01) if any bit, apart from the first one, is set in the | | part which has been shifted out of the arg. | | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ ENTRY(FPU_shrxs) push %ebp movl %esp,%ebp pushl %esi pushl %ebx movl PARAM2,%ecx movl PARAM1,%esi cmpl $64,%ecx /* shrd only works for 0..31 bits */ jnc Ls_more_than_63 cmpl $32,%ecx /* shrd only works for 0..31 bits */ jc Ls_less_than_32 /* We got here without jumps by assuming that the most common requirement is for small integers */ /* Shift by [32..63] bits */ subb $32,%cl movl (%esi),%eax /* lsl */ movl 4(%esi),%edx /* msl */ xorl %ebx,%ebx shrd %cl,%eax,%ebx shrd %cl,%edx,%eax shr %cl,%edx orl %ebx,%ebx /* test these 32 bits */ setne %bl test $0x7fffffff,%eax /* and 31 bits here */ setne %bh orw %bx,%bx /* Any of the 63 bit set ? */ setne %al movl %edx,(%esi) movl $0,4(%esi) popl %ebx popl %esi leave ret /* Shift by [0..31] bits */ Ls_less_than_32: movl (%esi),%ebx /* lsl */ movl 4(%esi),%edx /* msl */ xorl %eax,%eax /* extension */ shrd %cl,%ebx,%eax shrd %cl,%edx,%ebx shr %cl,%edx test $0x7fffffff,%eax /* only need to look at eax here */ setne %al movl %ebx,(%esi) movl %edx,4(%esi) popl %ebx popl %esi leave ret /* Shift by [64..95] bits */ Ls_more_than_63: cmpl $96,%ecx jnc Ls_more_than_95 subb $64,%cl movl (%esi),%ebx /* lsl */ movl 4(%esi),%eax /* msl */ xorl %edx,%edx /* extension */ shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax orl %ebx,%edx setne %bl test $0x7fffffff,%eax /* only need to look at eax here */ setne %bh orw %bx,%bx setne %al xorl %edx,%edx movl %edx,(%esi) /* set to zero */ movl %edx,4(%esi) /* set to zero */ popl %ebx popl %esi leave ret Ls_more_than_95: /* Shift by [96..inf) bits */ xorl %eax,%eax movl (%esi),%ebx orl 4(%esi),%ebx setne %al xorl %ebx,%ebx movl %ebx,(%esi) movl %ebx,4(%esi) popl %ebx popl %esi leave ret ENDPROC(FPU_shrxs)
AirFortressIlikara/LS2K0300-linux-4.19
12,441
arch/x86/math-emu/reg_u_div.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_div.S" /*---------------------------------------------------------------------------+ | reg_u_div.S | | | | Divide one FPU_REG by another and put the result in a destination FPU_REG.| | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Call from C as: | | int FPU_u_div(FPU_REG *a, FPU_REG *b, FPU_REG *dest, | | unsigned int control_word, char *sign) | | | | Does not compute the destination exponent, but does adjust it. | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" /* #define dSIGL(x) (x) */ /* #define dSIGH(x) 4(x) */ #ifndef NON_REENTRANT_FPU /* Local storage on the stack: Result: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 Overflow flag: ovfl_flag */ #define FPU_accum_3 -4(%ebp) #define FPU_accum_2 -8(%ebp) #define FPU_accum_1 -12(%ebp) #define FPU_accum_0 -16(%ebp) #define FPU_result_1 -20(%ebp) #define FPU_result_2 -24(%ebp) #define FPU_ovfl_flag -28(%ebp) #else .data /* Local storage in a static area: Result: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 Overflow flag: ovfl_flag */ .align 4,0 FPU_accum_3: .long 0 FPU_accum_2: .long 0 FPU_accum_1: .long 0 FPU_accum_0: .long 0 FPU_result_1: .long 0 FPU_result_2: .long 0 FPU_ovfl_flag: .byte 0 #endif /* NON_REENTRANT_FPU */ #define REGA PARAM1 #define REGB PARAM2 #define DEST PARAM3 .text ENTRY(FPU_u_div) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $28,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl REGA,%esi movl REGB,%ebx movl DEST,%edi movswl EXP(%esi),%edx movswl EXP(%ebx),%eax subl %eax,%edx addl EXP_BIAS,%edx /* A denormal and a large number can cause an exponent underflow */ cmpl EXP_WAY_UNDER,%edx jg xExp_not_underflow /* Set to a really low value allow correct handling */ movl EXP_WAY_UNDER,%edx xExp_not_underflow: movw %dx,EXP(%edi) #ifdef PARANOID /* testl $0x80000000, SIGH(%esi) // Dividend */ /* je L_bugged */ testl $0x80000000, SIGH(%ebx) /* Divisor */ je L_bugged #endif /* PARANOID */ /* Check if the divisor can be treated as having just 32 bits */ cmpl $0,SIGL(%ebx) jnz L_Full_Division /* Can't do a quick divide */ /* We should be able to zip through the division here */ movl SIGH(%ebx),%ecx /* The divisor */ movl SIGH(%esi),%edx /* Dividend */ movl SIGL(%esi),%eax /* Dividend */ cmpl %ecx,%edx setaeb FPU_ovfl_flag /* Keep a record */ jb L_no_adjust subl %ecx,%edx /* Prevent the overflow */ L_no_adjust: /* Divide the 64 bit number by the 32 bit denominator */ divl %ecx movl %eax,FPU_result_2 /* Work on the remainder of the first division */ xorl %eax,%eax divl %ecx movl %eax,FPU_result_1 /* Work on the remainder of the 64 bit division */ xorl %eax,%eax divl %ecx testb $255,FPU_ovfl_flag /* was the num > denom ? */ je L_no_overflow /* Do the shifting here */ /* increase the exponent */ incw EXP(%edi) /* shift the mantissa right one bit */ stc /* To set the ms bit */ rcrl FPU_result_2 rcrl FPU_result_1 rcrl %eax L_no_overflow: jmp LRound_precision /* Do the rounding as required */ /*---------------------------------------------------------------------------+ | Divide: Return arg1/arg2 to arg3. | | | | This routine does not use the exponents of arg1 and arg2, but does | | adjust the exponent of arg3. | | | | The maximum returned value is (ignoring exponents) | | .ffffffff ffffffff | | ------------------ = 1.ffffffff fffffffe | | .80000000 00000000 | | and the minimum is | | .80000000 00000000 | | ------------------ = .80000000 00000001 (rounded) | | .ffffffff ffffffff | | | +---------------------------------------------------------------------------*/ L_Full_Division: /* Save extended dividend in local register */ movl SIGL(%esi),%eax movl %eax,FPU_accum_2 movl SIGH(%esi),%eax movl %eax,FPU_accum_3 xorl %eax,%eax movl %eax,FPU_accum_1 /* zero the extension */ movl %eax,FPU_accum_0 /* zero the extension */ movl SIGL(%esi),%eax /* Get the current num */ movl SIGH(%esi),%edx /*----------------------------------------------------------------------*/ /* Initialization done. Do the first 32 bits. */ movb $0,FPU_ovfl_flag cmpl SIGH(%ebx),%edx /* Test for imminent overflow */ jb LLess_than_1 ja LGreater_than_1 cmpl SIGL(%ebx),%eax jb LLess_than_1 LGreater_than_1: /* The dividend is greater or equal, would cause overflow */ setaeb FPU_ovfl_flag /* Keep a record */ subl SIGL(%ebx),%eax sbbl SIGH(%ebx),%edx /* Prevent the overflow */ movl %eax,FPU_accum_2 movl %edx,FPU_accum_3 LLess_than_1: /* At this point, we have a dividend < divisor, with a record of adjustment in FPU_ovfl_flag */ /* We will divide by a number which is too large */ movl SIGH(%ebx),%ecx addl $1,%ecx jnc LFirst_div_not_1 /* here we need to divide by 100000000h, i.e., no division at all.. */ mov %edx,%eax jmp LFirst_div_done LFirst_div_not_1: divl %ecx /* Divide the numerator by the augmented denom ms dw */ LFirst_div_done: movl %eax,FPU_result_2 /* Put the result in the answer */ mull SIGH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_2 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_3 movl FPU_result_2,%eax /* Get the result back */ mull SIGL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 sbbl $0,FPU_accum_3 je LDo_2nd_32_bits /* Must check for non-zero result here */ #ifdef PARANOID jb L_bugged_1 #endif /* PARANOID */ /* need to subtract another once of the denom */ incl FPU_result_2 /* Correct the answer */ movl SIGL(%ebx),%eax movl SIGH(%ebx),%edx subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID sbbl $0,FPU_accum_3 jne L_bugged_1 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* Half of the main problem is done, there is just a reduced numerator to handle now. Work with the second 32 bits, FPU_accum_0 not used from now on */ LDo_2nd_32_bits: movl FPU_accum_2,%edx /* get the reduced num */ movl FPU_accum_1,%eax /* need to check for possible subsequent overflow */ cmpl SIGH(%ebx),%edx jb LDo_2nd_div ja LPrevent_2nd_overflow cmpl SIGL(%ebx),%eax jb LDo_2nd_div LPrevent_2nd_overflow: /* The numerator is greater or equal, would cause overflow */ /* prevent overflow */ subl SIGL(%ebx),%eax sbbl SIGH(%ebx),%edx movl %edx,FPU_accum_2 movl %eax,FPU_accum_1 incl FPU_result_2 /* Reflect the subtraction in the answer */ #ifdef PARANOID je L_bugged_2 /* Can't bump the result to 1.0 */ #endif /* PARANOID */ LDo_2nd_div: cmpl $0,%ecx /* augmented denom msw */ jnz LSecond_div_not_1 /* %ecx == 0, we are dividing by 1.0 */ mov %edx,%eax jmp LSecond_div_done LSecond_div_not_1: divl %ecx /* Divide the numerator by the denom ms dw */ LSecond_div_done: movl %eax,FPU_result_1 /* Put the result in the answer */ mull SIGH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ movl FPU_result_1,%eax /* Get the result back */ mull SIGL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 /* Subtract from the num local reg */ sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ jz LDo_3rd_32_bits #ifdef PARANOID cmpl $1,FPU_accum_2 jne L_bugged_2 #endif /* PARANOID */ /* need to subtract another once of the denom */ movl SIGL(%ebx),%eax movl SIGH(%ebx),%edx subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 jne L_bugged_2 #endif /* PARANOID */ addl $1,FPU_result_1 /* Correct the answer */ adcl $0,FPU_result_2 #ifdef PARANOID jc L_bugged_2 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* The division is essentially finished here, we just need to perform tidying operations. Deal with the 3rd 32 bits */ LDo_3rd_32_bits: movl FPU_accum_1,%edx /* get the reduced num */ movl FPU_accum_0,%eax /* need to check for possible subsequent overflow */ cmpl SIGH(%ebx),%edx /* denom */ jb LRound_prep ja LPrevent_3rd_overflow cmpl SIGL(%ebx),%eax /* denom */ jb LRound_prep LPrevent_3rd_overflow: /* prevent overflow */ subl SIGL(%ebx),%eax sbbl SIGH(%ebx),%edx movl %edx,FPU_accum_1 movl %eax,FPU_accum_0 addl $1,FPU_result_1 /* Reflect the subtraction in the answer */ adcl $0,FPU_result_2 jne LRound_prep jnc LRound_prep /* This is a tricky spot, there is an overflow of the answer */ movb $255,FPU_ovfl_flag /* Overflow -> 1.000 */ LRound_prep: /* * Prepare for rounding. * To test for rounding, we just need to compare 2*accum with the * denom. */ movl FPU_accum_0,%ecx movl FPU_accum_1,%edx movl %ecx,%eax orl %edx,%eax jz LRound_ovfl /* The accumulator contains zero. */ /* Multiply by 2 */ clc rcll $1,%ecx rcll $1,%edx jc LRound_large /* No need to compare, denom smaller */ subl SIGL(%ebx),%ecx sbbl SIGH(%ebx),%edx jnc LRound_not_small movl $0x70000000,%eax /* Denom was larger */ jmp LRound_ovfl LRound_not_small: jnz LRound_large movl $0x80000000,%eax /* Remainder was exactly 1/2 denom */ jmp LRound_ovfl LRound_large: movl $0xff000000,%eax /* Denom was smaller */ LRound_ovfl: /* We are now ready to deal with rounding, but first we must get the bits properly aligned */ testb $255,FPU_ovfl_flag /* was the num > denom ? */ je LRound_precision incw EXP(%edi) /* shift the mantissa right one bit */ stc /* Will set the ms bit */ rcrl FPU_result_2 rcrl FPU_result_1 rcrl %eax /* Round the result as required */ LRound_precision: decw EXP(%edi) /* binary point between 1st & 2nd bits */ movl %eax,%edx movl FPU_result_1,%ebx movl FPU_result_2,%eax jmp fpu_reg_round #ifdef PARANOID /* The logic is wrong if we got here */ L_bugged: pushl EX_INTERNAL|0x202 call EXCEPTION pop %ebx jmp L_exit L_bugged_1: pushl EX_INTERNAL|0x203 call EXCEPTION pop %ebx jmp L_exit L_bugged_2: pushl EX_INTERNAL|0x204 call EXCEPTION pop %ebx jmp L_exit L_exit: movl $-1,%eax popl %ebx popl %edi popl %esi leave ret #endif /* PARANOID */ ENDPROC(FPU_u_div)
AirFortressIlikara/LS2K0300-linux-4.19
2,479
arch/x86/lib/hweight.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/export.h> #include <asm/asm.h> /* * unsigned int __sw_hweight32(unsigned int w) * %rdi: w */ ENTRY(__sw_hweight32) #ifdef CONFIG_X86_64 movl %edi, %eax # w #endif __ASM_SIZE(push,) %__ASM_REG(dx) movl %eax, %edx # w -> t shrl %edx # t >>= 1 andl $0x55555555, %edx # t &= 0x55555555 subl %edx, %eax # w -= t movl %eax, %edx # w -> t shrl $2, %eax # w_tmp >>= 2 andl $0x33333333, %edx # t &= 0x33333333 andl $0x33333333, %eax # w_tmp &= 0x33333333 addl %edx, %eax # w = w_tmp + t movl %eax, %edx # w -> t shrl $4, %edx # t >>= 4 addl %edx, %eax # w_tmp += t andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 shrl $24, %eax # w = w_tmp >> 24 __ASM_SIZE(pop,) %__ASM_REG(dx) ret ENDPROC(__sw_hweight32) EXPORT_SYMBOL(__sw_hweight32) ENTRY(__sw_hweight64) #ifdef CONFIG_X86_64 pushq %rdi pushq %rdx movq %rdi, %rdx # w -> t movabsq $0x5555555555555555, %rax shrq %rdx # t >>= 1 andq %rdx, %rax # t &= 0x5555555555555555 movabsq $0x3333333333333333, %rdx subq %rax, %rdi # w -= t movq %rdi, %rax # w -> t shrq $2, %rdi # w_tmp >>= 2 andq %rdx, %rax # t &= 0x3333333333333333 andq %rdi, %rdx # w_tmp &= 0x3333333333333333 addq %rdx, %rax # w = w_tmp + t movq %rax, %rdx # w -> t shrq $4, %rdx # t >>= 4 addq %rdx, %rax # w_tmp += t movabsq $0x0f0f0f0f0f0f0f0f, %rdx andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f movabsq $0x0101010101010101, %rdx imulq %rdx, %rax # w_tmp *= 0x0101010101010101 shrq $56, %rax # w = w_tmp >> 56 popq %rdx popq %rdi ret #else /* CONFIG_X86_32 */ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ pushl %ecx call __sw_hweight32 movl %eax, %ecx # stash away result movl %edx, %eax # second part of input call __sw_hweight32 addl %ecx, %eax # result popl %ecx ret #endif ENDPROC(__sw_hweight64) EXPORT_SYMBOL(__sw_hweight64)
AirFortressIlikara/LS2K0300-linux-4.19
2,553
arch/x86/lib/atomic64_386_32.S
/* * atomic64_t for 386/486 * * Copyright © 2010 Luca Barbieri * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> #include <asm/alternative-asm.h> /* if you want SMP support, implement these with real spinlocks */ .macro LOCK reg pushfl cli .endm .macro UNLOCK reg popfl .endm #define BEGIN(op) \ .macro endp; \ ENDPROC(atomic64_##op##_386); \ .purgem endp; \ .endm; \ ENTRY(atomic64_##op##_386); \ LOCK v; #define ENDP endp #define RET \ UNLOCK v; \ ret #define RET_ENDP \ RET; \ ENDP #define v %ecx BEGIN(read) movl (v), %eax movl 4(v), %edx RET_ENDP #undef v #define v %esi BEGIN(set) movl %ebx, (v) movl %ecx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(xchg) movl (v), %eax movl 4(v), %edx movl %ebx, (v) movl %ecx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(add) addl %eax, (v) adcl %edx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(add_return) addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(sub) subl %eax, (v) sbbl %edx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(sub_return) negl %edx negl %eax sbbl $0, %edx addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(inc) addl $1, (v) adcl $0, 4(v) RET_ENDP #undef v #define v %esi BEGIN(inc_return) movl (v), %eax movl 4(v), %edx addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(dec) subl $1, (v) sbbl $0, 4(v) RET_ENDP #undef v #define v %esi BEGIN(dec_return) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(add_unless) addl %eax, %ecx adcl %edx, %edi addl (v), %eax adcl 4(v), %edx cmpl %eax, %ecx je 3f 1: movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: RET 3: cmpl %edx, %edi jne 1b xorl %eax, %eax jmp 2b ENDP #undef v #define v %esi BEGIN(inc_not_zero) movl (v), %eax movl 4(v), %edx testl %eax, %eax je 3f 1: addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: RET 3: testl %edx, %edx jne 1b jmp 2b ENDP #undef v #define v %esi BEGIN(dec_if_positive) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx js 1f movl %eax, (v) movl %edx, 4(v) 1: RET_ENDP #undef v
AirFortressIlikara/LS2K0300-linux-4.19
6,034
arch/x86/lib/memcpy_64.S
/* Copyright 2002 Andi Kleen */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/cpufeatures.h> #include <asm/mcsafe_test.h> #include <asm/alternative-asm.h> #include <asm/export.h> /* * We build a jump to memcpy_orig by default which gets NOPped out on * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */ /* * memcpy - Copy a memory block. * * Input: * rdi destination * rsi source * rdx count * * Output: * rax original destination */ ENTRY(__memcpy) .weak memcpy .p2align 4, 0x90 memcpy: ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS movq %rdi, %rax movq %rdx, %rcx shrq $3, %rcx andl $7, %edx rep movsq movl %edx, %ecx rep movsb ret ENDPROC(memcpy) ENDPROC(__memcpy) EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(__memcpy) /* * memcpy_erms() - enhanced fast string memcpy. This is faster and * simpler than memcpy. Use memcpy_erms when possible. */ ENTRY(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb ret ENDPROC(memcpy_erms) ENTRY(memcpy_orig) movq %rdi, %rax cmpq $0x20, %rdx jb .Lhandle_tail /* * We check whether memory false dependence could occur, * then jump to corresponding copy mode. */ cmp %dil, %sil jl .Lcopy_backward subq $0x20, %rdx .Lcopy_forward_loop: subq $0x20, %rdx /* * Move in blocks of 4x8 bytes: */ movq 0*8(%rsi), %r8 movq 1*8(%rsi), %r9 movq 2*8(%rsi), %r10 movq 3*8(%rsi), %r11 leaq 4*8(%rsi), %rsi movq %r8, 0*8(%rdi) movq %r9, 1*8(%rdi) movq %r10, 2*8(%rdi) movq %r11, 3*8(%rdi) leaq 4*8(%rdi), %rdi jae .Lcopy_forward_loop addl $0x20, %edx jmp .Lhandle_tail .Lcopy_backward: /* * Calculate copy position to tail. */ addq %rdx, %rsi addq %rdx, %rdi subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: subq $0x20, %rdx movq -1*8(%rsi), %r8 movq -2*8(%rsi), %r9 movq -3*8(%rsi), %r10 movq -4*8(%rsi), %r11 leaq -4*8(%rsi), %rsi movq %r8, -1*8(%rdi) movq %r9, -2*8(%rdi) movq %r10, -3*8(%rdi) movq %r11, -4*8(%rdi) leaq -4*8(%rdi), %rdi jae .Lcopy_backward_loop /* * Calculate copy position to head. */ addl $0x20, %edx subq %rdx, %rsi subq %rdx, %rdi .Lhandle_tail: cmpl $16, %edx jb .Lless_16bytes /* * Move data from 16 bytes to 31 bytes. */ movq 0*8(%rsi), %r8 movq 1*8(%rsi), %r9 movq -2*8(%rsi, %rdx), %r10 movq -1*8(%rsi, %rdx), %r11 movq %r8, 0*8(%rdi) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) retq .p2align 4 .Lless_16bytes: cmpl $8, %edx jb .Lless_8bytes /* * Move data from 8 bytes to 15 bytes. */ movq 0*8(%rsi), %r8 movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) retq .p2align 4 .Lless_8bytes: cmpl $4, %edx jb .Lless_3bytes /* * Move data from 4 bytes to 7 bytes. */ movl (%rsi), %ecx movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) retq .p2align 4 .Lless_3bytes: subl $1, %edx jb .Lend /* * Move data from 1 bytes to 3 bytes. */ movzbl (%rsi), %ecx jz .Lstore_1byte movzbq 1(%rsi), %r8 movzbq (%rsi, %rdx), %r9 movb %r8b, 1(%rdi) movb %r9b, (%rdi, %rdx) .Lstore_1byte: movb %cl, (%rdi) .Lend: retq ENDPROC(memcpy_orig) #ifndef CONFIG_UML MCSAFE_TEST_CTL /* * __memcpy_mcsafe - memory copy with machine check exception handling * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ ENTRY(__memcpy_mcsafe) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words /* Check for bad alignment of source */ testl $7, %esi /* Already aligned */ jz .L_8byte_aligned /* Copy one byte at a time until source is 8-byte aligned */ movl %esi, %ecx andl $7, %ecx subl $8, %ecx negl %ecx subl %ecx, %edx .L_read_leading_bytes: movb (%rsi), %al MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes MCSAFE_TEST_DST %rdi 1 .E_leading_bytes .L_write_leading_bytes: movb %al, (%rdi) incq %rsi incq %rdi decl %ecx jnz .L_read_leading_bytes .L_8byte_aligned: movl %edx, %ecx andl $7, %edx shrl $3, %ecx jz .L_no_whole_words .L_read_words: movq (%rsi), %r8 MCSAFE_TEST_SRC %rsi 8 .E_read_words MCSAFE_TEST_DST %rdi 8 .E_write_words .L_write_words: movq %r8, (%rdi) addq $8, %rsi addq $8, %rdi decl %ecx jnz .L_read_words /* Any trailing bytes? */ .L_no_whole_words: andl %edx, %edx jz .L_done_memcpy_trap /* Copy trailing bytes */ movl %edx, %ecx .L_read_trailing_bytes: movb (%rsi), %al MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes .L_write_trailing_bytes: movb %al, (%rdi) incq %rsi incq %rdi decl %ecx jnz .L_read_trailing_bytes /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax .L_done: ret ENDPROC(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) .section .fixup, "ax" /* * Return number of bytes not copied for any failure. Note that * there is no "tail" handling since the source buffer is 8-byte * aligned and poison is cacheline aligned. */ .E_read_words: shll $3, %ecx .E_leading_bytes: addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax jmp .L_done /* * For write fault handling, given the destination is unaligned, * we handle faults on multi-byte writes with a byte-by-byte * copy up to the write-protected page. */ .E_write_words: shll $3, %ecx addl %edx, %ecx movl %ecx, %edx jmp mcsafe_handle_tail .previous _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) _ASM_EXTABLE(.L_write_words, .E_write_words) _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) #endif
AirFortressIlikara/LS2K0300-linux-4.19
7,850
arch/x86/lib/copy_user_64.S
/* * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> * Copyright 2002 Andi Kleen, SuSE Labs. * Subject to the GNU Public License v2. * * Functions to copy from and to user space. */ #include <linux/linkage.h> #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro * code for rep movsq * * Input: * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ ENTRY(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ ALIGN_DESTINATION movl %edx,%ecx andl $63,%edx shrl $6,%ecx jz .L_copy_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 4: movq 3*8(%rsi),%r11 5: movq %r8,(%rdi) 6: movq %r9,1*8(%rdi) 7: movq %r10,2*8(%rdi) 8: movq %r11,3*8(%rdi) 9: movq 4*8(%rsi),%r8 10: movq 5*8(%rsi),%r9 11: movq 6*8(%rsi),%r10 12: movq 7*8(%rsi),%r11 13: movq %r8,4*8(%rdi) 14: movq %r9,5*8(%rdi) 15: movq %r10,6*8(%rdi) 16: movq %r11,7*8(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi decl %ecx jnz 1b .L_copy_short_string: movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz 20f 18: movq (%rsi),%r8 19: movq %r8,(%rdi) leaq 8(%rsi),%rsi leaq 8(%rdi),%rdi decl %ecx jnz 18b 20: andl %edx,%edx jz 23f movl %edx,%ecx 21: movb (%rsi),%al 22: movb %al,(%rdi) incq %rsi incq %rdi decl %ecx jnz 21b 23: xor %eax,%eax ASM_CLAC ret .section .fixup,"ax" 30: shll $6,%ecx addl %ecx,%edx jmp 60f 40: leal (%rdx,%rcx,8),%edx jmp 60f 50: movl %ecx,%edx 60: jmp copy_user_handle_tail /* ecx is zerorest also */ .previous _ASM_EXTABLE(1b,30b) _ASM_EXTABLE(2b,30b) _ASM_EXTABLE(3b,30b) _ASM_EXTABLE(4b,30b) _ASM_EXTABLE(5b,30b) _ASM_EXTABLE(6b,30b) _ASM_EXTABLE(7b,30b) _ASM_EXTABLE(8b,30b) _ASM_EXTABLE(9b,30b) _ASM_EXTABLE(10b,30b) _ASM_EXTABLE(11b,30b) _ASM_EXTABLE(12b,30b) _ASM_EXTABLE(13b,30b) _ASM_EXTABLE(14b,30b) _ASM_EXTABLE(15b,30b) _ASM_EXTABLE(16b,30b) _ASM_EXTABLE(18b,40b) _ASM_EXTABLE(19b,40b) _ASM_EXTABLE(21b,50b) _ASM_EXTABLE(22b,50b) ENDPROC(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. * This is also a lot simpler. Use them when possible. * * Only 4GB of copy is supported. This shouldn't be a problem * because the kernel normally only writes from/to page sized chunks * even if user space passed a longer buffer. * And more would be dangerous because both Intel and AMD have * errata with rep movsq > 4GB. If someone feels the need to fix * this please consider this. * * Input: * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ ENTRY(copy_user_generic_string) ASM_STAC cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION movl %edx,%ecx shrl $3,%ecx andl $7,%edx 1: rep movsq 2: movl %edx,%ecx 3: rep movsb xorl %eax,%eax ASM_CLAC ret .section .fixup,"ax" 11: leal (%rdx,%rcx,8),%ecx 12: movl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous _ASM_EXTABLE(1b,11b) _ASM_EXTABLE(3b,12b) ENDPROC(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) /* * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. * * Input: * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ ENTRY(copy_user_enhanced_fast_string) ASM_STAC cmpl $64,%edx jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ movl %edx,%ecx 1: rep movsb xorl %eax,%eax ASM_CLAC ret .section .fixup,"ax" 12: movl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous _ASM_EXTABLE(1b,12b) ENDPROC(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. * * Note: Cached memory copy is used when destination or size is not * naturally aligned. That is: * - Require 8-byte alignment when size is 8 bytes or larger. * - Require 4-byte alignment when size is 4 bytes. */ ENTRY(__copy_user_nocache) ASM_STAC /* If size is less than 8 bytes, go to 4-byte copy */ cmpl $8,%edx jb .L_4b_nocache_copy_entry /* If destination is not 8-byte aligned, "cache" copy to align it */ ALIGN_DESTINATION /* Set 4x8-byte copy count and remainder */ movl %edx,%ecx andl $63,%edx shrl $6,%ecx jz .L_8b_nocache_copy_entry /* jump if count is 0 */ /* Perform 4x8-byte nocache loop-copy */ .L_4x8b_nocache_copy_loop: 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 4: movq 3*8(%rsi),%r11 5: movnti %r8,(%rdi) 6: movnti %r9,1*8(%rdi) 7: movnti %r10,2*8(%rdi) 8: movnti %r11,3*8(%rdi) 9: movq 4*8(%rsi),%r8 10: movq 5*8(%rsi),%r9 11: movq 6*8(%rsi),%r10 12: movq 7*8(%rsi),%r11 13: movnti %r8,4*8(%rdi) 14: movnti %r9,5*8(%rdi) 15: movnti %r10,6*8(%rdi) 16: movnti %r11,7*8(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi decl %ecx jnz .L_4x8b_nocache_copy_loop /* Set 8-byte copy count and remainder */ .L_8b_nocache_copy_entry: movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz .L_4b_nocache_copy_entry /* jump if count is 0 */ /* Perform 8-byte nocache loop-copy */ .L_8b_nocache_copy_loop: 20: movq (%rsi),%r8 21: movnti %r8,(%rdi) leaq 8(%rsi),%rsi leaq 8(%rdi),%rdi decl %ecx jnz .L_8b_nocache_copy_loop /* If no byte left, we're done */ .L_4b_nocache_copy_entry: andl %edx,%edx jz .L_finish_copy /* If destination is not 4-byte aligned, go to byte copy: */ movl %edi,%ecx andl $3,%ecx jnz .L_1b_cache_copy_entry /* Set 4-byte copy count (1 or 0) and remainder */ movl %edx,%ecx andl $3,%edx shrl $2,%ecx jz .L_1b_cache_copy_entry /* jump if count is 0 */ /* Perform 4-byte nocache copy: */ 30: movl (%rsi),%r8d 31: movnti %r8d,(%rdi) leaq 4(%rsi),%rsi leaq 4(%rdi),%rdi /* If no bytes left, we're done: */ andl %edx,%edx jz .L_finish_copy /* Perform byte "cache" loop-copy for the remainder */ .L_1b_cache_copy_entry: movl %edx,%ecx .L_1b_cache_copy_loop: 40: movb (%rsi),%al 41: movb %al,(%rdi) incq %rsi incq %rdi decl %ecx jnz .L_1b_cache_copy_loop /* Finished copying; fence the prior stores */ .L_finish_copy: xorl %eax,%eax ASM_CLAC sfence ret .section .fixup,"ax" .L_fixup_4x8b_copy: shll $6,%ecx addl %ecx,%edx jmp .L_fixup_handle_tail .L_fixup_8b_copy: lea (%rdx,%rcx,8),%rdx jmp .L_fixup_handle_tail .L_fixup_4b_copy: lea (%rdx,%rcx,4),%rdx jmp .L_fixup_handle_tail .L_fixup_1b_copy: movl %ecx,%edx .L_fixup_handle_tail: sfence jmp copy_user_handle_tail .previous _ASM_EXTABLE(1b,.L_fixup_4x8b_copy) _ASM_EXTABLE(2b,.L_fixup_4x8b_copy) _ASM_EXTABLE(3b,.L_fixup_4x8b_copy) _ASM_EXTABLE(4b,.L_fixup_4x8b_copy) _ASM_EXTABLE(5b,.L_fixup_4x8b_copy) _ASM_EXTABLE(6b,.L_fixup_4x8b_copy) _ASM_EXTABLE(7b,.L_fixup_4x8b_copy) _ASM_EXTABLE(8b,.L_fixup_4x8b_copy) _ASM_EXTABLE(9b,.L_fixup_4x8b_copy) _ASM_EXTABLE(10b,.L_fixup_4x8b_copy) _ASM_EXTABLE(11b,.L_fixup_4x8b_copy) _ASM_EXTABLE(12b,.L_fixup_4x8b_copy) _ASM_EXTABLE(13b,.L_fixup_4x8b_copy) _ASM_EXTABLE(14b,.L_fixup_4x8b_copy) _ASM_EXTABLE(15b,.L_fixup_4x8b_copy) _ASM_EXTABLE(16b,.L_fixup_4x8b_copy) _ASM_EXTABLE(20b,.L_fixup_8b_copy) _ASM_EXTABLE(21b,.L_fixup_8b_copy) _ASM_EXTABLE(30b,.L_fixup_4b_copy) _ASM_EXTABLE(31b,.L_fixup_4b_copy) _ASM_EXTABLE(40b,.L_fixup_1b_copy) _ASM_EXTABLE(41b,.L_fixup_1b_copy) ENDPROC(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache)
AirFortressIlikara/LS2K0300-linux-4.19
1,253
arch/x86/lib/retpoline.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/stringify.h> #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/export.h> #include <asm/nospec-branch.h> .macro THUNK reg .section .text.__x86.indirect_thunk ENTRY(__x86_indirect_thunk_\reg) CFI_STARTPROC JMP_NOSPEC %\reg CFI_ENDPROC ENDPROC(__x86_indirect_thunk_\reg) .endm /* * Despite being an assembler file we can't just use .irp here * because __KSYM_DEPS__ only uses the C preprocessor and would * only see one instance of "__x86_indirect_thunk_\reg" rather * than one per register with the correct names. So we do it * the simple and nasty way... */ #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) #define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) GENERATE_THUNK(_ASM_AX) GENERATE_THUNK(_ASM_BX) GENERATE_THUNK(_ASM_CX) GENERATE_THUNK(_ASM_DX) GENERATE_THUNK(_ASM_SI) GENERATE_THUNK(_ASM_DI) GENERATE_THUNK(_ASM_BP) #ifdef CONFIG_64BIT GENERATE_THUNK(r8) GENERATE_THUNK(r9) GENERATE_THUNK(r10) GENERATE_THUNK(r11) GENERATE_THUNK(r12) GENERATE_THUNK(r13) GENERATE_THUNK(r14) GENERATE_THUNK(r15) #endif
AirFortressIlikara/LS2K0300-linux-4.19
3,748
arch/x86/lib/rwsem.S
/* * x86 semaphore implementation. * * (C) Copyright 1999 Linus Torvalds * * Portions Copyright 1999 Red Hat, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> */ #include <linux/linkage.h> #include <asm/alternative-asm.h> #include <asm/frame.h> #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) #ifdef CONFIG_X86_32 /* * The semaphore operations have a special calling sequence that * allow us to do a simpler in-line version of them. These routines * need to convert that sequence back into the C sequence when * there is contention on the semaphore. * * %eax contains the semaphore pointer on entry. Save the C-clobbered * registers (%eax, %edx and %ecx) except %eax which is either a return * value or just gets clobbered. Same is true for %edx so make sure GCC * reloads it after the slow path, by making it hold a temporary, for * example see ____down_write(). */ #define save_common_regs \ pushl %ecx #define restore_common_regs \ popl %ecx /* Avoid uglifying the argument copying x86-64 needs to do. */ .macro movq src, dst .endm #else /* * x86-64 rwsem wrappers * * This interfaces the inline asm code to the slow-path * C routines. We need to save the call-clobbered regs * that the asm does not mark as clobbered, and move the * argument from %rax to %rdi. * * NOTE! We don't need to save %rax, because the functions * will always return the semaphore pointer in %rax (which * is also the input argument to these helpers) * * The following can clobber %rdx because the asm clobbers it: * call_rwsem_down_write_failed * call_rwsem_wake * but %rdi, %rsi, %rcx, %r8-r11 always need saving. */ #define save_common_regs \ pushq %rdi; \ pushq %rsi; \ pushq %rcx; \ pushq %r8; \ pushq %r9; \ pushq %r10; \ pushq %r11 #define restore_common_regs \ popq %r11; \ popq %r10; \ popq %r9; \ popq %r8; \ popq %rcx; \ popq %rsi; \ popq %rdi #endif /* Fix up special calling conventions */ ENTRY(call_rwsem_down_read_failed) FRAME_BEGIN save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi call rwsem_down_read_failed __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END ret ENDPROC(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_read_failed_killable) FRAME_BEGIN save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi call rwsem_down_read_failed_killable __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END ret ENDPROC(call_rwsem_down_read_failed_killable) ENTRY(call_rwsem_down_write_failed) FRAME_BEGIN save_common_regs movq %rax,%rdi call rwsem_down_write_failed restore_common_regs FRAME_END ret ENDPROC(call_rwsem_down_write_failed) ENTRY(call_rwsem_down_write_failed_killable) FRAME_BEGIN save_common_regs movq %rax,%rdi call rwsem_down_write_failed_killable restore_common_regs FRAME_END ret ENDPROC(call_rwsem_down_write_failed_killable) ENTRY(call_rwsem_wake) FRAME_BEGIN /* do nothing if still outstanding active readers */ __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) jnz 1f save_common_regs movq %rax,%rdi call rwsem_wake restore_common_regs 1: FRAME_END ret ENDPROC(call_rwsem_wake) ENTRY(call_rwsem_downgrade_wake) FRAME_BEGIN save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi call rwsem_downgrade_wake __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END ret ENDPROC(call_rwsem_downgrade_wake)
AirFortressIlikara/LS2K0300-linux-4.19
2,782
arch/x86/lib/memset_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright 2002 Andi Kleen, SuSE Labs */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/export.h> /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is * simpler and shorter than the original function as well. * * rdi destination * rsi value (char) * rdx count (bytes) * * rax original destination */ .weak memset .p2align 4, 0x90 memset: ENTRY(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended * to use it when possible. If not available, use fast string instructions. * * Otherwise, use original memset function. */ ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memset_erms", X86_FEATURE_ERMS movq %rdi,%r9 movq %rdx,%rcx andl $7,%edx shrq $3,%rcx /* expand byte value */ movzbl %sil,%esi movabs $0x0101010101010101,%rax imulq %rsi,%rax rep stosq movl %edx,%ecx rep stosb movq %r9,%rax ret ENDPROC(memset) ENDPROC(__memset) EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset) /* * ISO C memset - set a memory block to a byte value. This function uses * enhanced rep stosb to override the fast string function. * The code is simpler and shorter than the fast string function as well. * * rdi destination * rsi value (char) * rdx count (bytes) * * rax original destination */ ENTRY(memset_erms) movq %rdi,%r9 movb %sil,%al movq %rdx,%rcx rep stosb movq %r9,%rax ret ENDPROC(memset_erms) ENTRY(memset_orig) movq %rdi,%r10 /* expand byte value */ movzbl %sil,%ecx movabs $0x0101010101010101,%rax imulq %rcx,%rax /* align dst */ movl %edi,%r9d andl $7,%r9d jnz .Lbad_alignment .Lafter_bad_alignment: movq %rdx,%rcx shrq $6,%rcx jz .Lhandle_tail .p2align 4 .Lloop_64: decq %rcx movq %rax,(%rdi) movq %rax,8(%rdi) movq %rax,16(%rdi) movq %rax,24(%rdi) movq %rax,32(%rdi) movq %rax,40(%rdi) movq %rax,48(%rdi) movq %rax,56(%rdi) leaq 64(%rdi),%rdi jnz .Lloop_64 /* Handle tail in loops. The loops should be faster than hard to predict jump tables. */ .p2align 4 .Lhandle_tail: movl %edx,%ecx andl $63&(~7),%ecx jz .Lhandle_7 shrl $3,%ecx .p2align 4 .Lloop_8: decl %ecx movq %rax,(%rdi) leaq 8(%rdi),%rdi jnz .Lloop_8 .Lhandle_7: andl $7,%edx jz .Lende .p2align 4 .Lloop_1: decl %edx movb %al,(%rdi) leaq 1(%rdi),%rdi jnz .Lloop_1 .Lende: movq %r10,%rax ret .Lbad_alignment: cmpq $7,%rdx jbe .Lhandle_7 movq %rax,(%rdi) /* unaligned store */ movq $8,%r8 subq %r9,%r8 addq %r8,%rdi subq %r8,%rdx jmp .Lafter_bad_alignment .Lfinal: ENDPROC(memset_orig)
AirFortressIlikara/LS2K0300-linux-4.19
1,670
arch/x86/lib/msr-reg.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <linux/errno.h> #include <asm/asm.h> #include <asm/msr.h> #ifdef CONFIG_X86_64 /* * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]); * * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] * */ .macro op_safe_regs op ENTRY(\op\()_safe_regs) pushq %rbx pushq %r12 movq %rdi, %r10 /* Save pointer */ xorl %r11d, %r11d /* Return value */ movl (%rdi), %eax movl 4(%rdi), %ecx movl 8(%rdi), %edx movl 12(%rdi), %ebx movl 20(%rdi), %r12d movl 24(%rdi), %esi movl 28(%rdi), %edi 1: \op 2: movl %eax, (%r10) movl %r11d, %eax /* Return value */ movl %ecx, 4(%r10) movl %edx, 8(%r10) movl %ebx, 12(%r10) movl %r12d, 20(%r10) movl %esi, 24(%r10) movl %edi, 28(%r10) popq %r12 popq %rbx ret 3: movl $-EIO, %r11d jmp 2b _ASM_EXTABLE(1b, 3b) ENDPROC(\op\()_safe_regs) .endm #else /* X86_32 */ .macro op_safe_regs op ENTRY(\op\()_safe_regs) pushl %ebx pushl %ebp pushl %esi pushl %edi pushl $0 /* Return value */ pushl %eax movl 4(%eax), %ecx movl 8(%eax), %edx movl 12(%eax), %ebx movl 20(%eax), %ebp movl 24(%eax), %esi movl 28(%eax), %edi movl (%eax), %eax 1: \op 2: pushl %eax movl 4(%esp), %eax popl (%eax) addl $4, %esp movl %ecx, 4(%eax) movl %edx, 8(%eax) movl %ebx, 12(%eax) movl %ebp, 20(%eax) movl %esi, 24(%eax) movl %edi, 28(%eax) popl %eax popl %edi popl %esi popl %ebp popl %ebx ret 3: movl $-EIO, 4(%esp) jmp 2b _ASM_EXTABLE(1b, 3b) ENDPROC(\op\()_safe_regs) .endm #endif op_safe_regs rdmsr op_safe_regs wrmsr
AirFortressIlikara/LS2K0300-linux-4.19
10,392
arch/x86/lib/checksum_32.S
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Pentium Pro/II routines: * Alexander Kjeldaas <astor@guardian.no> * Finn Arne Gangstad <finnag@guardian.no> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception * handling. * Andi Kleen, add zeroing on error * converted to pure assembler * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> #include <asm/export.h> #include <asm/nospec-branch.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) */ .text #ifndef CONFIG_X86_USE_PPRO_CHECKSUM /* * Experiments with Ethernet and SLIP connections show that buff * is aligned on either a 2-byte or 4-byte boundary. We get at * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ ENTRY(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: unsigned char *buff testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx jl 8f movzbl (%esi), %ebx adcl %ebx, %eax roll $8, %eax inc %esi testl $2, %esi jz 2f 10: subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f 1: movw (%esi), %bx addl $2, %esi addw %bx, %ax adcl $0, %eax 2: movl %ecx, %edx shrl $5, %ecx jz 2f testl %esi, %esi 1: movl (%esi), %ebx adcl %ebx, %eax movl 4(%esi), %ebx adcl %ebx, %eax movl 8(%esi), %ebx adcl %ebx, %eax movl 12(%esi), %ebx adcl %ebx, %eax movl 16(%esi), %ebx adcl %ebx, %eax movl 20(%esi), %ebx adcl %ebx, %eax movl 24(%esi), %ebx adcl %ebx, %eax movl 28(%esi), %ebx adcl %ebx, %eax lea 32(%esi), %esi dec %ecx jne 1b adcl $0, %eax 2: movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF 3: adcl (%esi), %eax lea 4(%esi), %esi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f movw (%esi),%cx leal 2(%esi),%esi je 6f shll $16,%ecx 5: movb (%esi),%cl 6: addl %ecx,%eax adcl $0, %eax 7: testb $1, 12(%esp) jz 8f roll $8, %eax 8: popl %ebx popl %esi ret ENDPROC(csum_partial) #else /* Version for PentiumII/PPro */ ENTRY(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: const unsigned char *buf testl $3, %esi jnz 25f 10: movl %ecx, %edx movl %ecx, %ebx andl $0x7c, %ebx shrl $7, %ecx addl %ebx,%esi shrl $2, %ebx negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi JMP_NOSPEC %ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax lea 2(%esi), %esi adcl $0, %eax jmp 10b 25: testl $1, %esi jz 30f # buf is odd dec %ecx jl 90f movzbl (%esi), %ebx addl %ebx, %eax adcl $0, %eax roll $8, %eax inc %esi testl $2, %esi jz 10b 30: subl $2, %ecx ja 20b je 32f addl $2, %ecx jz 80f movzbl (%esi),%ebx # csumming 1 byte, 2-aligned addl %ebx, %eax adcl $0, %eax jmp 80f 32: addw (%esi), %ax # csumming 2 bytes, 2-aligned adcl $0, %eax jmp 80f 40: addl -128(%esi), %eax adcl -124(%esi), %eax adcl -120(%esi), %eax adcl -116(%esi), %eax adcl -112(%esi), %eax adcl -108(%esi), %eax adcl -104(%esi), %eax adcl -100(%esi), %eax adcl -96(%esi), %eax adcl -92(%esi), %eax adcl -88(%esi), %eax adcl -84(%esi), %eax adcl -80(%esi), %eax adcl -76(%esi), %eax adcl -72(%esi), %eax adcl -68(%esi), %eax adcl -64(%esi), %eax adcl -60(%esi), %eax adcl -56(%esi), %eax adcl -52(%esi), %eax adcl -48(%esi), %eax adcl -44(%esi), %eax adcl -40(%esi), %eax adcl -36(%esi), %eax adcl -32(%esi), %eax adcl -28(%esi), %eax adcl -24(%esi), %eax adcl -20(%esi), %eax adcl -16(%esi), %eax adcl -12(%esi), %eax adcl -8(%esi), %eax adcl -4(%esi), %eax 45: lea 128(%esi), %esi adcl $0, %eax dec %ecx jge 40b movl %edx, %ecx 50: andl $3, %ecx jz 80f # Handle the last 1-3 bytes without jumping notl %ecx # 1->2, 2->1, 3->0, higher bits are masked movl $0xffffff,%ebx # by the shll and shrl instructions shll $3,%ecx shrl %cl,%ebx andl -128(%esi),%ebx # esi is 4-aligned so should be ok addl %ebx,%eax adcl $0,%eax 80: testb $1, 12(%esp) jz 90f roll $8, %eax 90: popl %ebx popl %esi ret ENDPROC(csum_partial) #endif EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr) */ /* * Copy from ds while checksumming, otherwise like csum_partial * * The macros SRC and DST specify the type of access for the instruction. * thus we can call a custom exception handler for all access types. * * FIXME: could someone double-check whether I haven't mixed up some SRC and * DST definitions? It's damn hard to trigger all cases. I hope I got * them all but there's no guarantee. */ #define SRC(y...) \ 9999: y; \ _ASM_EXTABLE(9999b, 6001f) #define DST(y...) \ 9999: y; \ _ASM_EXTABLE(9999b, 6002f) #ifndef CONFIG_X86_USE_PPRO_CHECKSUM #define ARGBASE 16 #define FP 12 ENTRY(csum_partial_copy_generic) subl $4,%esp pushl %edi pushl %esi pushl %ebx movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+4(%esp),%esi # src movl ARGBASE+8(%esp),%edi # dst testl $2, %edi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f SRC(1: movw (%esi), %bx ) addl $2, %esi DST( movw %bx, (%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax 2: movl %ecx, FP(%esp) shrl $5, %ecx jz 2f testl %esi, %esi SRC(1: movl (%esi), %ebx ) SRC( movl 4(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, (%edi) ) adcl %edx, %eax DST( movl %edx, 4(%edi) ) SRC( movl 8(%esi), %ebx ) SRC( movl 12(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, 8(%edi) ) adcl %edx, %eax DST( movl %edx, 12(%edi) ) SRC( movl 16(%esi), %ebx ) SRC( movl 20(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, 16(%edi) ) adcl %edx, %eax DST( movl %edx, 20(%edi) ) SRC( movl 24(%esi), %ebx ) SRC( movl 28(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, 24(%edi) ) adcl %edx, %eax DST( movl %edx, 28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi dec %ecx jne 1b adcl $0, %eax 2: movl FP(%esp), %edx movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF SRC(3: movl (%esi), %ebx ) adcl %ebx, %eax DST( movl %ebx, (%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f SRC( movw (%esi), %cx ) leal 2(%esi), %esi DST( movw %cx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx SRC(5: movb (%esi), %cl ) DST( movb %cl, (%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: 5000: # Exception handler: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr movl $-EFAULT, (%ebx) # zero the complete destination - computing the rest # is too much work movl ARGBASE+8(%esp), %edi # dst movl ARGBASE+12(%esp), %ecx # len xorl %eax,%eax rep ; stosb jmp 5000b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr movl $-EFAULT,(%ebx) jmp 5000b .previous popl %ebx popl %esi popl %edi popl %ecx # equivalent to addl $4,%esp ret ENDPROC(csum_partial_copy_generic) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ SRC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ DST(movl %ebx, x(%edi) ) ; #define ROUND(x) \ SRC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ DST(movl %ebx, x(%edi) ) ; #define ARGBASE 12 ENTRY(csum_partial_copy_generic) pushl %ebx pushl %edi pushl %esi movl ARGBASE+4(%esp),%esi #src movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+12(%esp),%ecx #len movl ARGBASE+16(%esp),%eax #sum # movl %ecx, %edx movl %ecx, %ebx movl %esi, %edx shrl $6, %ecx andl $0x3c, %ebx negl %ebx subl %ebx, %esi subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi JMP_NOSPEC %ebx 1: addl $64,%esi addl $64,%edi SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) 3: adcl $0,%eax addl $64, %edx dec %ecx jge 1b 4: movl ARGBASE+12(%esp),%edx #len andl $3, %edx jz 7f cmpl $2, %edx jb 5f SRC( movw (%esi), %dx ) leal 2(%esi), %esi DST( movw %dx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: SRC( movb (%esi), %dl ) DST( movb %dl, (%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr movl $-EFAULT, (%ebx) # zero the complete destination (computing the rest is too much work) movl ARGBASE+8(%esp),%edi # dst movl ARGBASE+12(%esp),%ecx # len xorl %eax,%eax rep; stosb jmp 7b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr movl $-EFAULT, (%ebx) jmp 7b .previous popl %esi popl %edi popl %ebx ret ENDPROC(csum_partial_copy_generic) #undef ROUND #undef ROUND1 #endif EXPORT_SYMBOL(csum_partial_copy_generic)
AirFortressIlikara/LS2K0300-linux-4.19
3,746
arch/x86/lib/csum-copy_64.S
/* * Copyright 2002, 2003 Andi Kleen, SuSE Labs. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. No warranty for anything given at all. */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> /* * Checksum copy with exception handling. * On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the * destination is zeroed. * * Input * rdi source * rsi destination * edx len (32bit) * ecx sum (32bit) * r8 src_err_ptr (int) * r9 dst_err_ptr (int) * * Output * eax 64bit sum. undefined in case of exception. * * Wrappers need to take care of valid exception sum and zeroing. * They also should align source or destination to 8 bytes. */ .macro source 10: _ASM_EXTABLE(10b, .Lbad_source) .endm .macro dest 20: _ASM_EXTABLE(20b, .Lbad_dest) .endm .macro ignore L=.Lignore 30: _ASM_EXTABLE(30b, \L) .endm ENTRY(csum_partial_copy_generic) cmpl $3*64, %edx jle .Lignore .Lignore: subq $7*8, %rsp movq %rbx, 2*8(%rsp) movq %r12, 3*8(%rsp) movq %r14, 4*8(%rsp) movq %r13, 5*8(%rsp) movq %r15, 6*8(%rsp) movq %r8, (%rsp) movq %r9, 1*8(%rsp) movl %ecx, %eax movl %edx, %ecx xorl %r9d, %r9d movq %rcx, %r12 shrq $6, %r12 jz .Lhandle_tail /* < 64 */ clc /* main loop. clear in 64 byte blocks */ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ /* r11: temp3, rdx: temp4, r12 loopcnt */ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */ .p2align 4 .Lloop: source movq (%rdi), %rbx source movq 8(%rdi), %r8 source movq 16(%rdi), %r11 source movq 24(%rdi), %rdx source movq 32(%rdi), %r10 source movq 40(%rdi), %r15 source movq 48(%rdi), %r14 source movq 56(%rdi), %r13 ignore 2f prefetcht0 5*64(%rdi) 2: adcq %rbx, %rax adcq %r8, %rax adcq %r11, %rax adcq %rdx, %rax adcq %r10, %rax adcq %r15, %rax adcq %r14, %rax adcq %r13, %rax decl %r12d dest movq %rbx, (%rsi) dest movq %r8, 8(%rsi) dest movq %r11, 16(%rsi) dest movq %rdx, 24(%rsi) dest movq %r10, 32(%rsi) dest movq %r15, 40(%rsi) dest movq %r14, 48(%rsi) dest movq %r13, 56(%rsi) 3: leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi jnz .Lloop adcq %r9, %rax /* do last up to 56 bytes */ .Lhandle_tail: /* ecx: count */ movl %ecx, %r10d andl $63, %ecx shrl $3, %ecx jz .Lfold clc .p2align 4 .Lloop_8: source movq (%rdi), %rbx adcq %rbx, %rax decl %ecx dest movq %rbx, (%rsi) leaq 8(%rsi), %rsi /* preserve carry */ leaq 8(%rdi), %rdi jnz .Lloop_8 adcq %r9, %rax /* add in carry */ .Lfold: /* reduce checksum to 32bits */ movl %eax, %ebx shrq $32, %rax addl %ebx, %eax adcl %r9d, %eax /* do last up to 6 bytes */ .Lhandle_7: movl %r10d, %ecx andl $7, %ecx shrl $1, %ecx jz .Lhandle_1 movl $2, %edx xorl %ebx, %ebx clc .p2align 4 .Lloop_1: source movw (%rdi), %bx adcl %ebx, %eax decl %ecx dest movw %bx, (%rsi) leaq 2(%rdi), %rdi leaq 2(%rsi), %rsi jnz .Lloop_1 adcl %r9d, %eax /* add in carry */ /* handle last odd byte */ .Lhandle_1: testb $1, %r10b jz .Lende xorl %ebx, %ebx source movb (%rdi), %bl dest movb %bl, (%rsi) addl %ebx, %eax adcl %r9d, %eax /* carry */ .Lende: movq 2*8(%rsp), %rbx movq 3*8(%rsp), %r12 movq 4*8(%rsp), %r14 movq 5*8(%rsp), %r13 movq 6*8(%rsp), %r15 addq $7*8, %rsp ret /* Exception handlers. Very simple, zeroing is done in the wrappers */ .Lbad_source: movq (%rsp), %rax testq %rax, %rax jz .Lende movl $-EFAULT, (%rax) jmp .Lende .Lbad_dest: movq 8(%rsp), %rax testq %rax, %rax jz .Lende movl $-EFAULT, (%rax) jmp .Lende ENDPROC(csum_partial_copy_generic)
AirFortressIlikara/LS2K0300-linux-4.19
1,837
arch/x86/lib/copy_page_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/export.h> /* * Some CPUs run faster using the string copy instructions (sane microcode). * It is also a lot simpler. Use this when possible. But, don't use streaming * copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the * prefetch distance based on SMP/UP. */ ALIGN ENTRY(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq ret ENDPROC(copy_page) EXPORT_SYMBOL(copy_page) ENTRY(copy_page_regs) subq $2*8, %rsp movq %rbx, (%rsp) movq %r12, 1*8(%rsp) movl $(4096/64)-5, %ecx .p2align 4 .Loop64: dec %rcx movq 0x8*0(%rsi), %rax movq 0x8*1(%rsi), %rbx movq 0x8*2(%rsi), %rdx movq 0x8*3(%rsi), %r8 movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 movq 0x8*7(%rsi), %r12 prefetcht0 5*64(%rsi) movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) movq %rdx, 0x8*2(%rdi) movq %r8, 0x8*3(%rdi) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) movq %r12, 0x8*7(%rdi) leaq 64 (%rsi), %rsi leaq 64 (%rdi), %rdi jnz .Loop64 movl $5, %ecx .p2align 4 .Loop2: decl %ecx movq 0x8*0(%rsi), %rax movq 0x8*1(%rsi), %rbx movq 0x8*2(%rsi), %rdx movq 0x8*3(%rsi), %r8 movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 movq 0x8*7(%rsi), %r12 movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) movq %rdx, 0x8*2(%rdi) movq %r8, 0x8*3(%rdi) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) movq %r12, 0x8*7(%rdi) leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi jnz .Loop2 movq (%rsp), %rbx movq 1*8(%rsp), %r12 addq $2*8, %rsp ret ENDPROC(copy_page_regs)
AirFortressIlikara/LS2K0300-linux-4.19
2,772
arch/x86/lib/atomic64_cx8_32.S
/* * atomic64_t for 586+ * * Copyright © 2010 Luca Barbieri * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> #include <asm/alternative-asm.h> .macro read64 reg movl %ebx, %eax movl %ecx, %edx /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ LOCK_PREFIX cmpxchg8b (\reg) .endm ENTRY(atomic64_read_cx8) read64 %ecx ret ENDPROC(atomic64_read_cx8) ENTRY(atomic64_set_cx8) 1: /* we don't need LOCK_PREFIX since aligned 64-bit writes * are atomic on 586 and newer */ cmpxchg8b (%esi) jne 1b ret ENDPROC(atomic64_set_cx8) ENTRY(atomic64_xchg_cx8) 1: LOCK_PREFIX cmpxchg8b (%esi) jne 1b ret ENDPROC(atomic64_xchg_cx8) .macro addsub_return func ins insc ENTRY(atomic64_\func\()_return_cx8) pushl %ebp pushl %ebx pushl %esi pushl %edi movl %eax, %esi movl %edx, %edi movl %ecx, %ebp read64 %ecx 1: movl %eax, %ebx movl %edx, %ecx \ins\()l %esi, %ebx \insc\()l %edi, %ecx LOCK_PREFIX cmpxchg8b (%ebp) jne 1b 10: movl %ebx, %eax movl %ecx, %edx popl %edi popl %esi popl %ebx popl %ebp ret ENDPROC(atomic64_\func\()_return_cx8) .endm addsub_return add add adc addsub_return sub sub sbb .macro incdec_return func ins insc ENTRY(atomic64_\func\()_return_cx8) pushl %ebx read64 %esi 1: movl %eax, %ebx movl %edx, %ecx \ins\()l $1, %ebx \insc\()l $0, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b 10: movl %ebx, %eax movl %ecx, %edx popl %ebx ret ENDPROC(atomic64_\func\()_return_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb ENTRY(atomic64_dec_if_positive_cx8) pushl %ebx read64 %esi 1: movl %eax, %ebx movl %edx, %ecx subl $1, %ebx sbb $0, %ecx js 2f LOCK_PREFIX cmpxchg8b (%esi) jne 1b 2: movl %ebx, %eax movl %ecx, %edx popl %ebx ret ENDPROC(atomic64_dec_if_positive_cx8) ENTRY(atomic64_add_unless_cx8) pushl %ebp pushl %ebx /* these just push these two parameters on the stack */ pushl %edi pushl %ecx movl %eax, %ebp movl %edx, %edi read64 %esi 1: cmpl %eax, 0(%esp) je 4f 2: movl %eax, %ebx movl %edx, %ecx addl %ebp, %ebx adcl %edi, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b movl $1, %eax 3: addl $8, %esp popl %ebx popl %ebp ret 4: cmpl %edx, 4(%esp) jne 2b xorl %eax, %eax jmp 3b ENDPROC(atomic64_add_unless_cx8) ENTRY(atomic64_inc_not_zero_cx8) pushl %ebx read64 %esi 1: movl %eax, %ecx orl %edx, %ecx jz 3f movl %eax, %ebx xorl %ecx, %ecx addl $1, %ebx adcl %edx, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b movl $1, %eax 3: popl %ebx ret ENDPROC(atomic64_inc_not_zero_cx8)
AirFortressIlikara/LS2K0300-linux-4.19
3,596
arch/x86/lib/memmove_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Normally compiler builtins are used, but sometimes the compiler calls out * of line code. Based on asm-i386/string.h. * * This assembly file is re-written from memmove_64.c file. * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/export.h> #undef memmove /* * Implement memmove(). This can handle overlap between src and dst. * * Input: * rdi: dest * rsi: src * rdx: count * * Output: * rax: dest */ .weak memmove .p2align 4, 0x90 memmove: ENTRY(__memmove) /* Handle more 32 bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx jb 1f /* Decide forward/backward copy mode */ cmp %rdi, %rsi jge .Lmemmove_begin_forward mov %rsi, %r8 add %rdx, %r8 cmp %rdi, %r8 jg 2f .Lmemmove_begin_forward: ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS /* * movsq instruction have many startup latency * so we handle small size by general register. */ cmp $680, %rdx jb 3f /* * movsq instruction is only good for aligned case. */ cmpb %dil, %sil je 4f 3: sub $0x20, %rdx /* * We gobble 32 bytes forward in each loop. */ 5: sub $0x20, %rdx movq 0*8(%rsi), %r11 movq 1*8(%rsi), %r10 movq 2*8(%rsi), %r9 movq 3*8(%rsi), %r8 leaq 4*8(%rsi), %rsi movq %r11, 0*8(%rdi) movq %r10, 1*8(%rdi) movq %r9, 2*8(%rdi) movq %r8, 3*8(%rdi) leaq 4*8(%rdi), %rdi jae 5b addq $0x20, %rdx jmp 1f /* * Handle data forward by movsq. */ .p2align 4 4: movq %rdx, %rcx movq -8(%rsi, %rdx), %r11 lea -8(%rdi, %rdx), %r10 shrq $3, %rcx rep movsq movq %r11, (%r10) jmp 13f .Lmemmove_end_forward: /* * Handle data backward by movsq. */ .p2align 4 7: movq %rdx, %rcx movq (%rsi), %r11 movq %rdi, %r10 leaq -8(%rsi, %rdx), %rsi leaq -8(%rdi, %rdx), %rdi shrq $3, %rcx std rep movsq cld movq %r11, (%r10) jmp 13f /* * Start to prepare for backward copy. */ .p2align 4 2: cmp $680, %rdx jb 6f cmp %dil, %sil je 7b 6: /* * Calculate copy position to tail. */ addq %rdx, %rsi addq %rdx, %rdi subq $0x20, %rdx /* * We gobble 32 bytes backward in each loop. */ 8: subq $0x20, %rdx movq -1*8(%rsi), %r11 movq -2*8(%rsi), %r10 movq -3*8(%rsi), %r9 movq -4*8(%rsi), %r8 leaq -4*8(%rsi), %rsi movq %r11, -1*8(%rdi) movq %r10, -2*8(%rdi) movq %r9, -3*8(%rdi) movq %r8, -4*8(%rdi) leaq -4*8(%rdi), %rdi jae 8b /* * Calculate copy position to head. */ addq $0x20, %rdx subq %rdx, %rsi subq %rdx, %rdi 1: cmpq $16, %rdx jb 9f /* * Move data from 16 bytes to 31 bytes. */ movq 0*8(%rsi), %r11 movq 1*8(%rsi), %r10 movq -2*8(%rsi, %rdx), %r9 movq -1*8(%rsi, %rdx), %r8 movq %r11, 0*8(%rdi) movq %r10, 1*8(%rdi) movq %r9, -2*8(%rdi, %rdx) movq %r8, -1*8(%rdi, %rdx) jmp 13f .p2align 4 9: cmpq $8, %rdx jb 10f /* * Move data from 8 bytes to 15 bytes. */ movq 0*8(%rsi), %r11 movq -1*8(%rsi, %rdx), %r10 movq %r11, 0*8(%rdi) movq %r10, -1*8(%rdi, %rdx) jmp 13f 10: cmpq $4, %rdx jb 11f /* * Move data from 4 bytes to 7 bytes. */ movl (%rsi), %r11d movl -4(%rsi, %rdx), %r10d movl %r11d, (%rdi) movl %r10d, -4(%rdi, %rdx) jmp 13f 11: cmp $2, %rdx jb 12f /* * Move data from 2 bytes to 3 bytes. */ movw (%rsi), %r11w movw -2(%rsi, %rdx), %r10w movw %r11w, (%rdi) movw %r10w, -2(%rdi, %rdx) jmp 13f 12: cmp $1, %rdx jb 13f /* * Move data for 1 byte. */ movb (%rsi), %r11b movb %r11b, (%rdi) 13: retq ENDPROC(__memmove) ENDPROC(memmove) EXPORT_SYMBOL(__memmove) EXPORT_SYMBOL(memmove)
AirFortressIlikara/LS2K0300-linux-4.19
1,165
arch/x86/lib/cmpxchg16b_emu.S
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * */ #include <linux/linkage.h> #include <asm/percpu.h> .text /* * Inputs: * %rsi : memory location to compare * %rax : low 64 bits of old value * %rdx : high 64 bits of old value * %rbx : low 64 bits of new value * %rcx : high 64 bits of new value * %al : Operation successful */ ENTRY(this_cpu_cmpxchg16b_emu) # # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not # via the ZF. Caller will access %al to get result. # # Note that this is only useful for a cpuops operation. Meaning that we # do *not* have a fully atomic operation but just an operation that is # *atomic* on a single cpu (as provided by the this_cpu_xx class of # macros). # pushfq cli cmpq PER_CPU_VAR((%rsi)), %rax jne .Lnot_same cmpq PER_CPU_VAR(8(%rsi)), %rdx jne .Lnot_same movq %rbx, PER_CPU_VAR((%rsi)) movq %rcx, PER_CPU_VAR(8(%rsi)) popfq mov $1, %al ret .Lnot_same: popfq xor %al,%al ret ENDPROC(this_cpu_cmpxchg16b_emu)
AirFortressIlikara/LS2K0300-linux-4.19
3,067
arch/x86/lib/getuser.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * __get_user functions. * * (C) Copyright 1998 Linus Torvalds * (C) Copyright 2005 Andi Kleen * (C) Copyright 2008 Glauber Costa * * These functions have a non-standard call interface * to make them more efficient, especially as they * return an error value in addition to the "real" * return value. */ /* * __get_user_X * * Inputs: %[r|e]ax contains the address. * * Outputs: %[r|e]ax is error code (0 or -EFAULT) * %[r|e]dx contains zero-extended value * %ecx contains the high half for 32-bit __get_user_8 * * * These functions should not modify any other registers, * as they get called from within inline assembly. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> .text ENTRY(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 1: movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret ENDPROC(__get_user_1) EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) add $1,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 2: movzwl -1(%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret ENDPROC(__get_user_2) EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) add $3,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 3: movl -3(%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret ENDPROC(__get_user_4) EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) #ifdef CONFIG_X86_64 add $7,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 4: movq -7(%_ASM_AX),%rdx xor %eax,%eax ASM_CLAC ret #else add $7,%_ASM_AX jc bad_get_user_8 mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 4: movl -7(%_ASM_AX),%edx 5: movl -3(%_ASM_AX),%ecx xor %eax,%eax ASM_CLAC ret #endif ENDPROC(__get_user_8) EXPORT_SYMBOL(__get_user_8) bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX ASM_CLAC ret END(bad_get_user) #ifdef CONFIG_X86_32 bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX ASM_CLAC ret END(bad_get_user_8) #endif _ASM_EXTABLE(1b,bad_get_user) _ASM_EXTABLE(2b,bad_get_user) _ASM_EXTABLE(3b,bad_get_user) #ifdef CONFIG_X86_64 _ASM_EXTABLE(4b,bad_get_user) #else _ASM_EXTABLE(4b,bad_get_user_8) _ASM_EXTABLE(5b,bad_get_user_8) #endif
AirFortressIlikara/LS2K0300-linux-4.19
2,036
arch/x86/lib/putuser.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * __put_user functions. * * (C) Copyright 2005 Linus Torvalds * (C) Copyright 2005 Andi Kleen * (C) Copyright 2008 Glauber Costa * * These functions have a non-standard call interface * to make them more efficient, especially as they * return an error value in addition to the "real" * return value. */ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/errno.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> /* * __put_user_X * * Inputs: %eax[:%edx] contains the data * %ecx contains the address * * Outputs: %eax is error code (0 or -EFAULT) * * These functions should not modify any other registers, * as they get called from within inline assembly. */ #define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX #define EXIT ASM_CLAC ; \ ret .text ENTRY(__put_user_1) ENTER cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user ASM_STAC 1: movb %al,(%_ASM_CX) xor %eax,%eax EXIT ENDPROC(__put_user_1) EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user ASM_STAC 2: movw %ax,(%_ASM_CX) xor %eax,%eax EXIT ENDPROC(__put_user_2) EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user ASM_STAC 3: movl %eax,(%_ASM_CX) xor %eax,%eax EXIT ENDPROC(__put_user_4) EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 5: movl %edx,4(%_ASM_CX) #endif xor %eax,%eax EXIT ENDPROC(__put_user_8) EXPORT_SYMBOL(__put_user_8) bad_put_user: movl $-EFAULT,%eax EXIT END(bad_put_user) _ASM_EXTABLE(1b,bad_put_user) _ASM_EXTABLE(2b,bad_put_user) _ASM_EXTABLE(3b,bad_put_user) _ASM_EXTABLE(4b,bad_put_user) #ifdef CONFIG_X86_32 _ASM_EXTABLE(5b,bad_put_user) #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,905
arch/x86/purgatory/entry64.S
/* * Copyright (C) 2003,2004 Eric Biederman (ebiederm@xmission.com) * Copyright (C) 2014 Red Hat Inc. * Author(s): Vivek Goyal <vgoyal@redhat.com> * * This code has been taken from kexec-tools. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ .text .balign 16 .code64 .globl entry64, entry64_regs entry64: /* Setup a gdt that should be preserved */ lgdt gdt(%rip) /* load the data segments */ movl $0x18, %eax /* data segment */ movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* Setup new stack */ leaq stack_init(%rip), %rsp pushq $0x10 /* CS */ leaq new_cs_exit(%rip), %rax pushq %rax lretq new_cs_exit: /* Load the registers */ movq rax(%rip), %rax movq rbx(%rip), %rbx movq rcx(%rip), %rcx movq rdx(%rip), %rdx movq rsi(%rip), %rsi movq rdi(%rip), %rdi movq rsp(%rip), %rsp movq rbp(%rip), %rbp movq r8(%rip), %r8 movq r9(%rip), %r9 movq r10(%rip), %r10 movq r11(%rip), %r11 movq r12(%rip), %r12 movq r13(%rip), %r13 movq r14(%rip), %r14 movq r15(%rip), %r15 /* Jump to the new code... */ jmpq *rip(%rip) .section ".rodata" .balign 4 entry64_regs: rax: .quad 0x0 rcx: .quad 0x0 rdx: .quad 0x0 rbx: .quad 0x0 rsp: .quad 0x0 rbp: .quad 0x0 rsi: .quad 0x0 rdi: .quad 0x0 r8: .quad 0x0 r9: .quad 0x0 r10: .quad 0x0 r11: .quad 0x0 r12: .quad 0x0 r13: .quad 0x0 r14: .quad 0x0 r15: .quad 0x0 rip: .quad 0x0 .size entry64_regs, . - entry64_regs /* GDT */ .section ".rodata" .balign 16 gdt: /* 0x00 unusable segment * 0x08 unused * so use them as gdt ptr */ .word gdt_end - gdt - 1 .quad gdt .word 0, 0, 0 /* 0x10 4GB flat code segment */ .word 0xFFFF, 0x0000, 0x9A00, 0x00AF /* 0x18 4GB flat data segment */ .word 0xFFFF, 0x0000, 0x9200, 0x00CF gdt_end: stack: .quad 0, 0 stack_init:
AirFortressIlikara/LS2K0300-linux-4.19
1,113
arch/x86/purgatory/setup-x86_64.S
/* * purgatory: setup code * * Copyright (C) 2003,2004 Eric Biederman (ebiederm@xmission.com) * Copyright (C) 2014 Red Hat Inc. * * This code has been taken from kexec-tools. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <asm/purgatory.h> .text .globl purgatory_start .balign 16 purgatory_start: .code64 /* Load a gdt so I know what the segment registers are */ lgdt gdt(%rip) /* load the data segments */ movl $0x18, %eax /* data segment */ movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* Setup a stack */ leaq lstack_end(%rip), %rsp /* Call the C code */ call purgatory jmp entry64 .section ".rodata" .balign 16 gdt: /* 0x00 unusable segment * 0x08 unused * so use them as the gdt ptr */ .word gdt_end - gdt - 1 .quad gdt .word 0, 0, 0 /* 0x10 4GB flat code segment */ .word 0xFFFF, 0x0000, 0x9A00, 0x00AF /* 0x18 4GB flat data segment */ .word 0xFFFF, 0x0000, 0x9200, 0x00CF gdt_end: .bss .balign 4096 lstack: .skip 4096 lstack_end:
AirFortressIlikara/LS2K0300-linux-4.19
1,588
arch/x86/boot/bioscall.S
/* ----------------------------------------------------------------------- * * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2 or (at your * option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * "Glove box" for BIOS calls. Avoids the constant problems with BIOSes * touching registers they shouldn't be. */ .code16 .section ".inittext","ax" .globl intcall .type intcall, @function intcall: /* Self-modify the INT instruction. Ugly, but works. */ cmpb %al, 3f je 1f movb %al, 3f jmp 1f /* Synchronize pipeline */ 1: /* Save state */ pushfl pushw %fs pushw %gs pushal /* Copy input state to stack frame */ subw $44, %sp movw %dx, %si movw %sp, %di movw $11, %cx rep; movsd /* Pop full state from the stack */ popal popw %gs popw %fs popw %es popw %ds popfl /* Actual INT */ .byte 0xcd /* INT opcode */ 3: .byte 0 /* Push full state to the stack */ pushfl pushw %ds pushw %es pushw %fs pushw %gs pushal /* Re-establish C environment invariants */ cld movzwl %sp, %esp movw %cs, %ax movw %ax, %ds movw %ax, %es /* Copy output state from stack frame */ movw 68(%esp), %di /* Original %cx == 3rd argument */ andw %di, %di jz 4f movw %sp, %si movw $11, %cx rep; movsd 4: addw $44, %sp /* Restore state and return */ popal popw %gs popw %fs popfl retl .size intcall, .-intcall
AirFortressIlikara/LS2K0300-linux-4.19
17,018
arch/x86/boot/header.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * header.S * * Copyright (C) 1991, 1992 Linus Torvalds * * Based on bootsect.S and setup.S * modified by more people than can be counted * * Rewritten as a common file by H. Peter Anvin (Apr 2007) * * BIG FAT NOTE: We're in real mode using 64k segments. Therefore segment * addresses must be multiplied by 16 to obtain their respective linear * addresses. To avoid confusion, linear addresses are written using leading * hex while segment addresses are written as segment:offset. * */ #include <asm/segment.h> #include <asm/boot.h> #include <asm/page_types.h> #include <asm/setup.h> #include <asm/bootparam.h> #include "boot.h" #include "voffset.h" #include "zoffset.h" BOOTSEG = 0x07C0 /* original address of boot-sector */ SYSSEG = 0x1000 /* historical load address >> 4 */ #ifndef SVGA_MODE #define SVGA_MODE ASK_VGA #endif #ifndef ROOT_RDONLY #define ROOT_RDONLY 1 #endif .code16 .section ".bstext", "ax" .global bootsect_start bootsect_start: #ifdef CONFIG_EFI_STUB # "MZ", MS-DOS header .byte 0x4d .byte 0x5a #endif # Normalize the start address ljmp $BOOTSEG, $start2 start2: movw %cs, %ax movw %ax, %ds movw %ax, %es movw %ax, %ss xorw %sp, %sp sti cld movw $bugger_off_msg, %si msg_loop: lodsb andb %al, %al jz bs_die movb $0xe, %ah movw $7, %bx int $0x10 jmp msg_loop bs_die: # Allow the user to press a key, then reboot xorw %ax, %ax int $0x16 int $0x19 # int 0x19 should never return. In case it does anyway, # invoke the BIOS reset code... ljmp $0xf000,$0xfff0 #ifdef CONFIG_EFI_STUB .org 0x3c # # Offset to the PE header. # .long pe_header #endif /* CONFIG_EFI_STUB */ .section ".bsdata", "a" bugger_off_msg: .ascii "Use a boot loader.\r\n" .ascii "\n" .ascii "Remove disk and press any key to reboot...\r\n" .byte 0 #ifdef CONFIG_EFI_STUB pe_header: .ascii "PE" .word 0 coff_header: #ifdef CONFIG_X86_32 .word 0x14c # i386 #else .word 0x8664 # x86-64 #endif .word 4 # nr_sections .long 0 # TimeDateStamp .long 0 # PointerToSymbolTable .long 1 # NumberOfSymbols .word section_table - optional_header # SizeOfOptionalHeader #ifdef CONFIG_X86_32 .word 0x306 # Characteristics. # IMAGE_FILE_32BIT_MACHINE | # IMAGE_FILE_DEBUG_STRIPPED | # IMAGE_FILE_EXECUTABLE_IMAGE | # IMAGE_FILE_LINE_NUMS_STRIPPED #else .word 0x206 # Characteristics # IMAGE_FILE_DEBUG_STRIPPED | # IMAGE_FILE_EXECUTABLE_IMAGE | # IMAGE_FILE_LINE_NUMS_STRIPPED #endif optional_header: #ifdef CONFIG_X86_32 .word 0x10b # PE32 format #else .word 0x20b # PE32+ format #endif .byte 0x02 # MajorLinkerVersion .byte 0x14 # MinorLinkerVersion # Filled in by build.c .long 0 # SizeOfCode .long 0 # SizeOfInitializedData .long 0 # SizeOfUninitializedData # Filled in by build.c .long 0x0000 # AddressOfEntryPoint .long 0x0200 # BaseOfCode #ifdef CONFIG_X86_32 .long 0 # data #endif extra_header_fields: #ifdef CONFIG_X86_32 .long 0 # ImageBase #else .quad 0 # ImageBase #endif .long 0x20 # SectionAlignment .long 0x20 # FileAlignment .word 0 # MajorOperatingSystemVersion .word 0 # MinorOperatingSystemVersion .word 0 # MajorImageVersion .word 0 # MinorImageVersion .word 0 # MajorSubsystemVersion .word 0 # MinorSubsystemVersion .long 0 # Win32VersionValue # # The size of the bzImage is written in tools/build.c # .long 0 # SizeOfImage .long 0x200 # SizeOfHeaders .long 0 # CheckSum .word 0xa # Subsystem (EFI application) .word 0 # DllCharacteristics #ifdef CONFIG_X86_32 .long 0 # SizeOfStackReserve .long 0 # SizeOfStackCommit .long 0 # SizeOfHeapReserve .long 0 # SizeOfHeapCommit #else .quad 0 # SizeOfStackReserve .quad 0 # SizeOfStackCommit .quad 0 # SizeOfHeapReserve .quad 0 # SizeOfHeapCommit #endif .long 0 # LoaderFlags .long 0x6 # NumberOfRvaAndSizes .quad 0 # ExportTable .quad 0 # ImportTable .quad 0 # ResourceTable .quad 0 # ExceptionTable .quad 0 # CertificationTable .quad 0 # BaseRelocationTable # Section table section_table: # # The offset & size fields are filled in by build.c. # .ascii ".setup" .byte 0 .byte 0 .long 0 .long 0x0 # startup_{32,64} .long 0 # Size of initialized data # on disk .long 0x0 # startup_{32,64} .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long 0x60500020 # Characteristics (section flags) # # The EFI application loader requires a relocation section # because EFI applications must be relocatable. The .reloc # offset & size fields are filled in by build.c. # .ascii ".reloc" .byte 0 .byte 0 .long 0 .long 0 .long 0 # SizeOfRawData .long 0 # PointerToRawData .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long 0x42100040 # Characteristics (section flags) # # The offset & size fields are filled in by build.c. # .ascii ".text" .byte 0 .byte 0 .byte 0 .long 0 .long 0x0 # startup_{32,64} .long 0 # Size of initialized data # on disk .long 0x0 # startup_{32,64} .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long 0x60500020 # Characteristics (section flags) # # The offset & size fields are filled in by build.c. # .ascii ".bss" .byte 0 .byte 0 .byte 0 .byte 0 .long 0 .long 0x0 .long 0 # Size of initialized data # on disk .long 0x0 .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long 0xc8000080 # Characteristics (section flags) #endif /* CONFIG_EFI_STUB */ # Kernel attributes; used by setup. This is part 1 of the # header, from the old boot sector. .section ".header", "a" .globl sentinel sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */ .globl hdr hdr: setup_sects: .byte 0 /* Filled in by build.c */ root_flags: .word ROOT_RDONLY syssize: .long 0 /* Filled in by build.c */ ram_size: .word 0 /* Obsolete */ vid_mode: .word SVGA_MODE root_dev: .word 0 /* Filled in by build.c */ boot_flag: .word 0xAA55 # offset 512, entry point .globl _start _start: # Explicitly enter this as bytes, or the assembler # tries to generate a 3-byte jump here, which causes # everything else to push off to the wrong offset. .byte 0xeb # short (2-byte) jump .byte start_of_setup-1f 1: # Part 2 of the header, from the old setup.S .ascii "HdrS" # header signature .word 0x020d # header version number (>= 0x0105) # or else old loadlin-1.5 will fail) .globl realmode_swtch realmode_swtch: .word 0, 0 # default_switch, SETUPSEG start_sys_seg: .word SYSSEG # obsolete and meaningless, but just # in case something decided to "use" it .word kernel_version-512 # pointing to kernel version string # above section of header is compatible # with loadlin-1.5 (header v1.5). Don't # change it. type_of_loader: .byte 0 # 0 means ancient bootloader, newer # bootloaders know to change this. # See Documentation/x86/boot.txt for # assigned ids # flags, unused bits must be zero (RFU) bit within loadflags loadflags: .byte LOADED_HIGH # The kernel is to be loaded high setup_move_size: .word 0x8000 # size to move, when setup is not # loaded at 0x90000. We will move setup # to 0x90000 then just before jumping # into the kernel. However, only the # loader knows how much data behind # us also needs to be loaded. code32_start: # here loaders can put a different # start address for 32-bit code. .long 0x100000 # 0x100000 = default for big kernel ramdisk_image: .long 0 # address of loaded ramdisk image # Here the loader puts the 32-bit # address where it loaded the image. # This only will be read by the kernel. ramdisk_size: .long 0 # its size in bytes bootsect_kludge: .long 0 # obsolete heap_end_ptr: .word _end+STACK_SIZE-512 # (Header version 0x0201 or later) # space from here (exclusive) down to # end of setup code can be used by setup # for local heap purposes. ext_loader_ver: .byte 0 # Extended boot loader version ext_loader_type: .byte 0 # Extended boot loader type cmd_line_ptr: .long 0 # (Header version 0x0202 or later) # If nonzero, a 32-bit pointer # to the kernel command line. # The command line should be # located between the start of # setup and the end of low # memory (0xa0000), or it may # get overwritten before it # gets read. If this field is # used, there is no longer # anything magical about the # 0x90000 segment; the setup # can be located anywhere in # low memory 0x10000 or higher. initrd_addr_max: .long 0x7fffffff # (Header version 0x0203 or later) # The highest safe address for # the contents of an initrd # The current kernel allows up to 4 GB, # but leave it at 2 GB to avoid # possible bootloader bugs. kernel_alignment: .long CONFIG_PHYSICAL_ALIGN #physical addr alignment #required for protected mode #kernel #ifdef CONFIG_RELOCATABLE relocatable_kernel: .byte 1 #else relocatable_kernel: .byte 0 #endif min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment xloadflags: #ifdef CONFIG_X86_64 # define XLF0 XLF_KERNEL_64 /* 64-bit kernel */ #else # define XLF0 0 #endif #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) /* kernel/boot_param/ramdisk could be loaded above 4g */ # define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G #else # define XLF1 0 #endif #ifdef CONFIG_EFI_STUB # ifdef CONFIG_EFI_MIXED # define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64) # else # ifdef CONFIG_X86_64 # define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */ # else # define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */ # endif # endif #else # define XLF23 0 #endif #if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC_CORE) # define XLF4 XLF_EFI_KEXEC #else # define XLF4 0 #endif .word XLF0 | XLF1 | XLF23 | XLF4 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, #added with boot protocol #version 2.06 hardware_subarch: .long 0 # subarchitecture, added with 2.07 # default to 0 for normal x86 PC hardware_subarch_data: .quad 0 payload_offset: .long ZO_input_data payload_length: .long ZO_z_input_len setup_data: .quad 0 # 64-bit physical pointer to # single linked list of # struct setup_data pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr # # Getting to provably safe in-place decompression is hard. Worst case # behaviours need to be analyzed. Here let's take the decompression of # a gzip-compressed kernel as example, to illustrate it: # # The file layout of gzip compressed kernel is: # # magic[2] # method[1] # flags[1] # timestamp[4] # extraflags[1] # os[1] # compressed data blocks[N] # crc[4] orig_len[4] # # ... resulting in +18 bytes overhead of uncompressed data. # # (For more information, please refer to RFC 1951 and RFC 1952.) # # Files divided into blocks # 1 bit (last block flag) # 2 bits (block type) # # 1 block occurs every 32K -1 bytes or when there 50% compression # has been achieved. The smallest block type encoding is always used. # # stored: # 32 bits length in bytes. # # fixed: # magic fixed tree. # symbols. # # dynamic: # dynamic tree encoding. # symbols. # # # The buffer for decompression in place is the length of the uncompressed # data, plus a small amount extra to keep the algorithm safe. The # compressed data is placed at the end of the buffer. The output pointer # is placed at the start of the buffer and the input pointer is placed # where the compressed data starts. Problems will occur when the output # pointer overruns the input pointer. # # The output pointer can only overrun the input pointer if the input # pointer is moving faster than the output pointer. A condition only # triggered by data whose compressed form is larger than the uncompressed # form. # # The worst case at the block level is a growth of the compressed data # of 5 bytes per 32767 bytes. # # The worst case internal to a compressed block is very hard to figure. # The worst case can at least be bounded by having one bit that represents # 32764 bytes and then all of the rest of the bytes representing the very # very last byte. # # All of which is enough to compute an amount of extra data that is required # to be safe. To avoid problems at the block level allocating 5 extra bytes # per 32767 bytes of data is sufficient. To avoid problems internal to a # block adding an extra 32767 bytes (the worst case uncompressed block size) # is sufficient, to ensure that in the worst case the decompressed data for # block will stop the byte before the compressed data for a block begins. # To avoid problems with the compressed data's meta information an extra 18 # bytes are needed. Leading to the formula: # # extra_bytes = (uncompressed_size >> 12) + 32768 + 18 # # Adding 8 bytes per 32K is a bit excessive but much easier to calculate. # Adding 32768 instead of 32767 just makes for round numbers. # # Above analysis is for decompressing gzip compressed kernel only. Up to # now 6 different decompressor are supported all together. And among them # xz stores data in chunks and has maximum chunk of 64K. Hence safety # margin should be updated to cover all decompressors so that we don't # need to deal with each of them separately. Please check # the description in lib/decompressor_xxx.c for specific information. # # extra_bytes = (uncompressed_size >> 12) + 65536 + 128 # # LZ4 is even worse: data that cannot be further compressed grows by 0.4%, # or one byte per 256 bytes. OTOH, we can safely get rid of the +128 as # the size-dependent part now grows so fast. # # extra_bytes = (uncompressed_size >> 8) + 65536 #define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536) #if ZO_z_output_len > ZO_z_input_len # define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \ ZO_z_input_len) #else # define ZO_z_extract_offset ZO_z_extra_bytes #endif /* * The extract_offset has to be bigger than ZO head section. Otherwise when * the head code is running to move ZO to the end of the buffer, it will * overwrite the head code itself. */ #if (ZO__ehead - ZO_startup_32) > ZO_z_extract_offset # define ZO_z_min_extract_offset ((ZO__ehead - ZO_startup_32 + 4095) & ~4095) #else # define ZO_z_min_extract_offset ((ZO_z_extract_offset + 4095) & ~4095) #endif #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_min_extract_offset) #define VO_INIT_SIZE (VO__end - VO__text) #if ZO_INIT_SIZE > VO_INIT_SIZE # define INIT_SIZE ZO_INIT_SIZE #else # define INIT_SIZE VO_INIT_SIZE #endif init_size: .long INIT_SIZE # kernel initialization size handover_offset: .long 0 # Filled in by build.c # End of setup header ##################################################### .section ".entrytext", "ax" start_of_setup: # Force %es = %ds movw %ds, %ax movw %ax, %es cld # Apparently some ancient versions of LILO invoked the kernel with %ss != %ds, # which happened to work by accident for the old code. Recalculate the stack # pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the # stack behind its own code, so we can't blindly put it directly past the heap. movw %ss, %dx cmpw %ax, %dx # %ds == %ss? movw %sp, %dx je 2f # -> assume %sp is reasonably set # Invalid %ss, make up a new stack movw $_end, %dx testb $CAN_USE_HEAP, loadflags jz 1f movw heap_end_ptr, %dx 1: addw $STACK_SIZE, %dx jnc 2f xorw %dx, %dx # Prevent wraparound 2: # Now %dx should point to the end of our stack space andw $~3, %dx # dword align (might as well...) jnz 3f movw $0xfffc, %dx # Make sure we're not zero 3: movw %ax, %ss movzwl %dx, %esp # Clear upper half of %esp sti # Now we should have a working stack # We will have entered with %cs = %ds+0x20, normalize %cs so # it is on par with the other segments. pushw %ds pushw $6f lretw 6: # Check signature at end of setup cmpl $0x5a5aaa55, setup_sig jne setup_bad # Zero the bss movw $__bss_start, %di movw $_end+3, %cx xorl %eax, %eax subw %di, %cx shrw $2, %cx rep; stosl # Jump to C code (should not return) calll main # Setup corrupt somehow... setup_bad: movl $setup_corrupt, %eax calll puts # Fall through... .globl die .type die, @function die: hlt jmp die .size die, .-die .section ".initdata", "a" setup_corrupt: .byte 7 .string "No setup signature found...\n"
AirFortressIlikara/LS2K0300-linux-4.19
1,052
arch/x86/boot/copy.S
/* ----------------------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ #include <linux/linkage.h> /* * Memory copy routines */ .code16 .text GLOBAL(memcpy) pushw %si pushw %di movw %ax, %di movw %dx, %si pushw %cx shrw $2, %cx rep; movsl popw %cx andw $3, %cx rep; movsb popw %di popw %si retl ENDPROC(memcpy) GLOBAL(memset) pushw %di movw %ax, %di movzbl %dl, %eax imull $0x01010101,%eax pushw %cx shrw $2, %cx rep; stosl popw %cx andw $3, %cx rep; stosb popw %di retl ENDPROC(memset) GLOBAL(copy_from_fs) pushw %ds pushw %fs popw %ds calll memcpy popw %ds retl ENDPROC(copy_from_fs) GLOBAL(copy_to_fs) pushw %es pushw %fs popw %es calll memcpy popw %es retl ENDPROC(copy_to_fs)
AirFortressIlikara/LS2K0300-linux-4.19
1,743
arch/x86/boot/pmjump.S
/* ----------------------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * The actual transition into protected mode */ #include <asm/boot.h> #include <asm/processor-flags.h> #include <asm/segment.h> #include <linux/linkage.h> .text .code16 /* * void protected_mode_jump(u32 entrypoint, u32 bootparams); */ GLOBAL(protected_mode_jump) movl %edx, %esi # Pointer to boot_params table xorl %ebx, %ebx movw %cs, %bx shll $4, %ebx addl %ebx, 2f jmp 1f # Short jump to serialize on 386/486 1: movw $__BOOT_DS, %cx movw $__BOOT_TSS, %di movl %cr0, %edx orb $X86_CR0_PE, %dl # Protected mode movl %edx, %cr0 # Transition to 32-bit mode .byte 0x66, 0xea # ljmpl opcode 2: .long in_pm32 # offset .word __BOOT_CS # segment ENDPROC(protected_mode_jump) .code32 .section ".text32","ax" GLOBAL(in_pm32) # Set up data segments for flat 32-bit mode movl %ecx, %ds movl %ecx, %es movl %ecx, %fs movl %ecx, %gs movl %ecx, %ss # The 32-bit code sets up its own stack, but this way we do have # a valid stack if some debugging hack wants to use it. addl %ebx, %esp # Set up TR to make Intel VT happy ltr %di # Clear registers to allow for future extensions to the # 32-bit boot protocol xorl %ecx, %ecx xorl %edx, %edx xorl %ebx, %ebx xorl %ebp, %ebp xorl %edi, %edi # Set up LDTR to make Intel VT happy lldt %cx jmpl *%eax # Jump to the 32-bit entrypoint ENDPROC(in_pm32)
AirFortressIlikara/LS2K0300-linux-4.19
49,248
arch/x86/entry/entry_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/x86_64/entry.S * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * * entry.S contains the system-call and fault low-level handling routines. * * Some of this is documented in Documentation/x86/entry_64.txt * * A note on terminology: * - iret frame: Architecture defined interrupt frame from SS to RIP * at the top of the kernel process stack. * * Some macro usage: * - ENTRY/END: Define functions in the symbol table. * - TRACE_IRQ_*: Trace hardirq state for lock debugging. * - idtentry: Define exception entry points. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/msr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/hw_irq.h> #include <asm/page_types.h> #include <asm/irqflags.h> #include <asm/paravirt.h> #include <asm/percpu.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/pgtable_types.h> #include <asm/export.h> #include <asm/frame.h> #include <asm/nospec-branch.h> #include <linux/err.h> #include "calling.h" .code64 .section .entry.text, "ax" #ifdef CONFIG_PARAVIRT ENTRY(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_FLAGS flags:req #ifdef CONFIG_TRACE_IRQFLAGS btl $9, \flags /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: #endif .endm .macro TRACE_IRQS_IRETQ TRACE_IRQS_FLAGS EFLAGS(%rsp) .endm /* * When dynamic function tracer is enabled it will add a breakpoint * to all locations that it is about to modify, sync CPUs, update * all the code, sync CPUs, then remove the breakpoints. In this time * if lockdep is enabled, it might jump back into the debug handler * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). * * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to * make sure the stack pointer does not get reset back to the top * of the debug stack, and instead just reuses the current stack. */ #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) .macro TRACE_IRQS_OFF_DEBUG call debug_stack_set_zero TRACE_IRQS_OFF call debug_stack_reset .endm .macro TRACE_IRQS_ON_DEBUG call debug_stack_set_zero TRACE_IRQS_ON call debug_stack_reset .endm .macro TRACE_IRQS_IRETQ_DEBUG btl $9, EFLAGS(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON_DEBUG 1: .endm #else # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ #endif /* * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. * * This is the only entry point used for 64-bit system calls. The * hardware interface is reasonably well designed and the register to * argument mapping Linux uses fits well with the registers that are * available when SYSCALL is used. * * SYSCALL instructions can be found inlined in libc implementations as * well as some other programs and libraries. There are also a handful * of SYSCALL instructions in the vDSO used, for example, as a * clock_gettimeofday fallback. * * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, * then loads new ss, cs, and rip from previously programmed MSRs. * rflags gets masked by a value from another MSR (so CLD and CLAC * are not needed). SYSCALL does not save anything on the stack * and does not change rsp. * * Registers on entry: * rax system call number * rcx return address * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) * rdi arg0 * rsi arg1 * rdx arg2 * r10 arg3 (needs to be moved to rcx to conform to C ABI) * r8 arg4 * r9 arg5 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) * * Only called from user space. * * When user can change pt_regs->foo always force IRET. That is because * it deals with uncanonical addresses better. SYSRET has trouble * with them due to bugs in both AMD and Intel CPUs. */ .pushsection .entry_trampoline, "ax" /* * The code in here gets remapped into cpu_entry_area's trampoline. This means * that the assembler and linker have the wrong idea as to where this code * lives (and, in fact, it's mapped more than once, so it's not even at a * fixed address). So we can't reference any symbols outside the entry * trampoline and expect it to work. * * Instead, we carefully abuse %rip-relative addressing. * _entry_trampoline(%rip) refers to the start of the remapped) entry * trampoline. We can thus find cpu_entry_area with this macro: */ #define CPU_ENTRY_AREA \ _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ #define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA ENTRY(entry_SYSCALL_64_trampoline) UNWIND_HINT_EMPTY swapgs /* Stash the user RSP. */ movq %rsp, RSP_SCRATCH /* Note: using %rsp as a scratch reg. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp /* Load the top of the task stack into RSP */ movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp /* Start building the simulated IRET frame. */ pushq $__USER_DS /* pt_regs->ss */ pushq RSP_SCRATCH /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ /* * x86 lacks a near absolute jump, and we can't jump to the real * entry text with a relative jump. We could push the target * address and then use retq, but this destroys the pipeline on * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, * spill RDI and restore it in a second-stage trampoline. */ pushq %rdi movq $entry_SYSCALL_64_stage2, %rdi JMP_NOSPEC %rdi END(entry_SYSCALL_64_trampoline) .popsection ENTRY(entry_SYSCALL_64_stage2) UNWIND_HINT_EMPTY popq %rdi jmp entry_SYSCALL_64_after_hwframe END(entry_SYSCALL_64_stage2) ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * it is too small to ever cause noticeable irq latency. */ swapgs /* * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it * is not required to switch CR3. */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS TRACE_IRQS_OFF /* IRQs are off. */ movq %rax, %rdi movq %rsp, %rsi call do_syscall_64 /* returns with IRQs disabled */ TRACE_IRQS_IRETQ /* we're about to change IF */ /* * Try to use SYSRET instead of IRET if we're returning to * a completely clean 64-bit userspace context. If we're not, * go to the slow exit path. */ movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ jne swapgs_restore_regs_and_return_to_usermode /* * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP * in kernel space. This essentially lets the user take over * the kernel, since userspace controls RSP. * * If width of "canonical tail" ever becomes variable, this will need * to be updated to remain correct on both old and new CPUs. * * Change top bits to match most significant bit (47th or 56th bit * depending on paging mode) in the address. */ #ifdef CONFIG_X86_5LEVEL ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 #else shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx #endif /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 jne swapgs_restore_regs_and_return_to_usermode cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode movq R11(%rsp), %r11 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ jne swapgs_restore_regs_and_return_to_usermode /* * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot * restore RF properly. If the slowpath sets it for whatever reason, we * need to restore it correctly. * * SYSRET can restore TF, but unlike IRET, restoring TF results in a * trap from userspace immediately after SYSRET. This would cause an * infinite loop whenever #DB happens with register state that satisfies * the opportunistic SYSRET conditions. For example, single-stepping * this user code: * * movq $stuck_here, %rcx * pushfq * popq %r11 * stuck_here: * * would never get past 'stuck_here'. */ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 jnz swapgs_restore_regs_and_return_to_usermode /* nothing to check for RSP */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ POP_REGS pop_rdi=0 skip_r11rcx=1 /* * Now all regs are restored except RSP and RDI. * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp UNWIND_HINT_EMPTY pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi popq %rsp USERGS_SYSRET64 END(entry_SYSCALL_64) /* * %rdi: prev task * %rsi: next task */ ENTRY(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers * This must match the order in inactive_task_frame */ pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushfq /* switch stack */ movq %rsp, TASK_threadsp(%rdi) movq TASK_threadsp(%rsi), %rsp #ifdef CONFIG_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif #ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated * with userspace addresses. On CPUs where those concerns * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif /* restore callee-saved registers */ popfq popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp jmp __switch_to END(__switch_to_asm) /* * A newly forked process directly context switches into this address. * * rax: prev task we switched from * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ ENTRY(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ testq %rbx, %rbx /* from kernel_thread? */ jnz 1f /* kernel threads are uncommon */ 2: UNWIND_HINT_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ jmp swapgs_restore_regs_and_return_to_usermode 1: /* kernel thread */ UNWIND_HINT_EMPTY movq %r12, %rdi CALL_NOSPEC %rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() * syscall. */ movq $0, RAX(%rsp) jmp 2b END(ret_from_fork) /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 ENTRY(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS pushq $(~vector+0x80) /* Note: always in signed byte range */ jmp common_interrupt .align 8 vector=vector+1 .endr END(irq_entries_start) .align 8 ENTRY(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) UNWIND_HINT_IRET_REGS pushq $(~vector+0x80) /* Note: always in signed byte range */ jmp common_spurious .align 8 vector=vector+1 .endr END(spurious_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushq %rax SAVE_FLAGS(CLBR_RAX) testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 .Lokay_\@: popq %rax #endif .endm /* * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. * Requires kernel GSBASE. * * The invariant is that, if irq_count != -1, then the IRQ stack is in use. */ .macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 DEBUG_ENTRY_ASSERT_IRQS_OFF .if \save_ret /* * If save_ret is set, the original stack contains one additional * entry -- the return address. Therefore, move the address one * entry below %rsp to \old_rsp. */ leaq 8(%rsp), \old_rsp .else movq %rsp, \old_rsp .endif .if \regs UNWIND_HINT_REGS base=\old_rsp .endif incl PER_CPU_VAR(irq_count) jnz .Lirq_stack_push_old_rsp_\@ /* * Right now, if we just incremented irq_count to zero, we've * claimed the IRQ stack but we haven't switched to it yet. * * If anything is added that can interrupt us here without using IST, * it must be *extremely* careful to limit its stack usage. This * could include kprobes and a hypothetical future IST-less #DB * handler. * * The OOPS unwinder relies on the word at the top of the IRQ * stack linking back to the previous RSP for the entire time we're * on the IRQ stack. For this to work reliably, we need to write * it before we actually move ourselves to the IRQ stack. */ movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) movq PER_CPU_VAR(irq_stack_ptr), %rsp #ifdef CONFIG_DEBUG_ENTRY /* * If the first movq above becomes wrong due to IRQ stack layout * changes, the only way we'll notice is if we try to unwind right * here. Assert that we set up the stack right to catch this type * of bug quickly. */ cmpq -8(%rsp), \old_rsp je .Lirq_stack_okay\@ ud2 .Lirq_stack_okay\@: #endif .Lirq_stack_push_old_rsp_\@: pushq \old_rsp .if \regs UNWIND_HINT_REGS indirect=1 .endif .if \save_ret /* * Push the return address to the stack. This return address can * be found at the "real" original RSP, which was offset by 8 at * the beginning of this macro. */ pushq -8(\old_rsp) .endif .endm /* * Undoes ENTER_IRQ_STACK. */ .macro LEAVE_IRQ_STACK regs=1 DEBUG_ENTRY_ASSERT_IRQS_OFF /* We need to be off the IRQ stack before decrementing irq_count. */ popq %rsp .if \regs UNWIND_HINT_REGS .endif /* * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming * the irq stack but we're not on it. */ decl PER_CPU_VAR(irq_count) .endm /* * Interrupt entry helper function. * * Entry runs with interrupts off. Stack layout at entry: * +----------------------------------------------------+ * | regs->ss | * | regs->rsp | * | regs->eflags | * | regs->cs | * | regs->ip | * +----------------------------------------------------+ * | regs->orig_ax = ~(interrupt number) | * +----------------------------------------------------+ * | return address | * +----------------------------------------------------+ */ ENTRY(interrupt_entry) UNWIND_HINT_IRET_REGS offset=16 ASM_CLAC cld testb $3, CS-ORIG_RAX+8(%rsp) jz 1f SWAPGS FENCE_SWAPGS_USER_ENTRY /* * Switch to the thread stack. The IRET frame and orig_ax are * on the stack, as well as the return address. RDI..R12 are * not (yet) on the stack and space has not (yet) been * allocated for them. */ pushq %rdi /* Need to switch before accessing the thread stack. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi movq %rsp, %rdi movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* * We have RDI, return address, and orig_ax on the stack on * top of the IRET frame. That means offset=24 */ UNWIND_HINT_IRET_REGS base=%rdi offset=24 pushq 7*8(%rdi) /* regs->ss */ pushq 6*8(%rdi) /* regs->rsp */ pushq 5*8(%rdi) /* regs->eflags */ pushq 4*8(%rdi) /* regs->cs */ pushq 3*8(%rdi) /* regs->ip */ UNWIND_HINT_IRET_REGS pushq 2*8(%rdi) /* regs->orig_ax */ pushq 8(%rdi) /* return address */ movq (%rdi), %rdi jmp 2f 1: FENCE_SWAPGS_KERNEL_ENTRY 2: PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) jz 1f /* * IRQ from user mode. * * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). Since TRACE_IRQS_OFF is idempotent, * the simplest way to handle it is to just call it twice if * we enter from user mode. There's no reason to optimize this since * TRACE_IRQS_OFF is a no-op if lockdep is off. */ TRACE_IRQS_OFF CALL_enter_from_user_mode 1: ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF ret END(interrupt_entry) _ASM_NOKPROBE(interrupt_entry) /* Interrupt entry/exit. */ /* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_spurious/interrupt. */ common_spurious: addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 call smp_spurious_interrupt /* rdi points to pt_regs */ jmp ret_from_intr END(common_spurious) _ASM_NOKPROBE(common_spurious) /* common_interrupt is a hotpath. Align it */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 call do_IRQ /* rdi points to pt_regs */ /* 0(%rsp): old RSP */ ret_from_intr: DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF LEAVE_IRQ_STACK testb $3, CS(%rsp) jz retint_kernel /* Interrupt came from user space */ GLOBAL(retint_user) mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ GLOBAL(swapgs_restore_regs_and_return_to_usermode) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) jnz 1f ud2 1: #endif POP_REGS pop_rdi=0 /* * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp UNWIND_HINT_EMPTY /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ pushq 5*8(%rdi) /* RSP */ pushq 4*8(%rdi) /* EFLAGS */ pushq 3*8(%rdi) /* CS */ pushq 2*8(%rdi) /* RIP */ /* Push user RDI on the trampoline stack. */ pushq (%rdi) /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi /* Restore RDI. */ popq %rdi SWAPGS INTERRUPT_RETURN /* Returning to kernel space */ retint_kernel: #ifdef CONFIG_PREEMPT /* Interrupts are off */ /* Check if we need preemption */ btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) jnz 1f call preempt_schedule_irq jmp 0b 1: #endif /* * The iretq could re-enable interrupts: */ TRACE_IRQS_IRETQ GLOBAL(restore_regs_and_return_to_kernel) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ testb $3, CS(%rsp) jz 1f ud2 1: #endif POP_REGS addq $8, %rsp /* skip regs->orig_ax */ /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * when returning from IPI handler. */ INTERRUPT_RETURN ENTRY(native_iret) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in * 64-bit mode SS:RSP on the exception stack is always valid. */ #ifdef CONFIG_X86_ESPFIX64 testb $4, (SS-RIP)(%rsp) jnz native_irq_return_ldt #endif .global native_irq_return_iret native_irq_return_iret: /* * This may fault. Non-paranoid faults on return to userspace are * handled by fixup_bad_iret. These include #SS, #GP, and #NP. * Double-faults due to espfix64 are handled in do_double_fault. * Other faults here are fatal. */ iretq #ifdef CONFIG_X86_ESPFIX64 native_irq_return_ldt: /* * We are running with user GSBASE. All GPRs contain their user * values. We have a percpu ESPFIX stack that is eight slots * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom * of the ESPFIX stack. * * We clobber RAX and RDI in this code. We stash RDI on the * normal stack and RAX on the ESPFIX stack. * * The ESPFIX stack layout we set up looks like this: * * --- top of ESPFIX stack --- * SS * RSP * RFLAGS * CS * RIP <-- RSP points here when we're done * RAX <-- espfix_waddr points here * --- bottom of ESPFIX stack --- */ pushq %rdi /* Stash user RDI */ SWAPGS /* to kernel GS */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ movq %rax, (1*8)(%rdi) movq (2*8)(%rsp), %rax /* user CS */ movq %rax, (2*8)(%rdi) movq (3*8)(%rsp), %rax /* user RFLAGS */ movq %rax, (3*8)(%rdi) movq (5*8)(%rsp), %rax /* user SS */ movq %rax, (5*8)(%rdi) movq (4*8)(%rsp), %rax /* user RSP */ movq %rax, (4*8)(%rdi) /* Now RAX == RSP. */ andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ /* * espfix_stack[31:16] == 0. The page tables are set up such that * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of * espfix_waddr for any X. That is, there are 65536 RO aliases of * the same page. Set up RSP so that RSP[31:16] contains the * respective 16 bits of the /userspace/ RSP and RSP nonetheless * still points to an RO alias of the ESPFIX stack. */ orq PER_CPU_VAR(espfix_stack), %rax SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi SWAPGS /* to user GS */ popq %rdi /* Restore user RDI */ movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 /* * At this point, we cannot write to the stack any more, but we can * still read. */ popq %rax /* Restore user RAX */ /* * RSP now points to an ordinary IRET frame, except that the page * is read-only and RSP[31:16] are preloaded with the userspace * values. We can now IRET back to userspace. */ jmp native_irq_return_iret #endif END(common_interrupt) _ASM_NOKPROBE(common_interrupt) /* * APIC interrupts. */ .macro apicinterrupt3 num sym do_sym ENTRY(\sym) UNWIND_HINT_IRET_REGS pushq $~(\num) .Lcommon_\sym: call interrupt_entry UNWIND_HINT_REGS indirect=1 call \do_sym /* rdi points to pt_regs */ jmp ret_from_intr END(\sym) _ASM_NOKPROBE(\sym) .endm /* Make sure APIC interrupt handlers end up in the irqentry section: */ #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" #define POP_SECTION_IRQENTRY .popsection .macro apicinterrupt num sym do_sym PUSH_SECTION_IRQENTRY apicinterrupt3 \num \sym \do_sym POP_SECTION_IRQENTRY .endm #ifdef CONFIG_SMP apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt #endif #ifdef CONFIG_X86_UV apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt #endif apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_HAVE_KVM apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi #endif #ifdef CONFIG_X86_MCE_THRESHOLD apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt #endif #ifdef CONFIG_X86_MCE_AMD apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt #endif #ifdef CONFIG_X86_THERMAL_VECTOR apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt #endif #ifdef CONFIG_SMP apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt #endif apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt #ifdef CONFIG_IRQ_WORK apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt #endif /* * Exception entry points. */ #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 /* Sanity check */ .if \shift_ist != -1 && \paranoid == 0 .error "using shift_ist requires paranoid=1" .endif ASM_CLAC .if \has_error_code == 0 pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif .if \paranoid == 1 testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ jnz .Lfrom_usermode_switch_stack_\@ .endif .if \create_gap == 1 /* * If coming from kernel space, create a 6-word gap to allow the * int3 handler to emulate a call instruction. */ testb $3, CS-ORIG_RAX(%rsp) jnz .Lfrom_usermode_no_gap_\@ .rept 6 pushq 5*8(%rsp) .endr UNWIND_HINT_IRET_REGS offset=8 .Lfrom_usermode_no_gap_\@: .endif .if \paranoid call paranoid_entry .else call error_entry .endif UNWIND_HINT_REGS /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ .if \paranoid .if \shift_ist != -1 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ .else TRACE_IRQS_OFF .endif .endif movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code movq ORIG_RAX(%rsp), %rsi /* get error code */ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ .else xorl %esi, %esi /* no error code */ .endif .if \shift_ist != -1 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) .endif call \do_sym .if \shift_ist != -1 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) .endif /* these procedures expect "no swapgs" flag in ebx */ .if \paranoid jmp paranoid_exit .else jmp error_exit .endif .if \paranoid == 1 /* * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers * run in real process context if user_mode(regs). */ .Lfrom_usermode_switch_stack_\@: call error_entry movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code movq ORIG_RAX(%rsp), %rsi /* get error code */ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ .else xorl %esi, %esi /* no error code */ .endif call \do_sym jmp error_exit .endif _ASM_NOKPROBE(\sym) END(\sym) .endm idtentry divide_error do_divide_error has_error_code=0 idtentry overflow do_overflow has_error_code=0 idtentry bounds do_bounds has_error_code=0 idtentry invalid_op do_invalid_op has_error_code=0 idtentry device_not_available do_device_not_available has_error_code=0 idtentry double_fault do_double_fault has_error_code=1 paranoid=2 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 idtentry invalid_TSS do_invalid_TSS has_error_code=1 idtentry segment_not_present do_segment_not_present has_error_code=1 idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 idtentry coprocessor_error do_coprocessor_error has_error_code=0 idtentry alignment_check do_alignment_check has_error_code=1 idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 /* * Reload gs selector with exception handling * edi: new selector */ ENTRY(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) TRACE_IRQS_OFF SWAPGS .Lgs_change: movl %edi, %gs 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS TRACE_IRQS_FLAGS (%rsp) popfq FRAME_END ret ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) .section .fixup, "ax" /* running with kernelgs */ bad_gs: SWAPGS /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ movl $__USER_DS, %eax movl %eax, %gs .endm ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG xorl %eax, %eax movl %eax, %gs jmp 2b .previous /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp ENTER_IRQ_STACK regs=0 old_rsp=%r11 call __do_softirq LEAVE_IRQ_STACK regs=0 leaveq ret ENDPROC(do_softirq_own_stack) #ifdef CONFIG_XEN idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 /* * A note on the "critical region" in our callback handler. * We want to avoid stacking callback handlers due to events occurring * during handling of the last event. To do this, we keep events disabled * until we've done all processing. HOWEVER, we must enable events before * popping the stack frame (can't be done atomically) and so it would still * be possible to get enough handler activations to overflow the stack. * Although unlikely, bugs of that kind are hard to track down, so we'd * like to avoid the possibility. * So, on entry to the handler we detect whether we interrupted an * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. */ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ UNWIND_HINT_FUNC movq %rdi, %rsp /* we don't return, adjust the stack frame */ UNWIND_HINT_REGS ENTER_IRQ_STACK old_rsp=%r10 call xen_evtchn_do_upcall LEAVE_IRQ_STACK #ifndef CONFIG_PREEMPT call xen_maybe_preempt_hcall #endif jmp error_exit END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. * We get here for two reasons: * 1. Fault while reloading DS, ES, FS or GS * 2. Fault while executing IRET * Category 1 we do not need to fix up as Xen has already reloaded all segment * registers that could be reloaded and zeroed the others. * Category 2 we fix up by killing the current process. We cannot use the * normal Linux return path in this case because if we use the IRET hypercall * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ ENTRY(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) jne 1f movl %es, %ecx cmpw %cx, 0x18(%rsp) jne 1f movl %fs, %ecx cmpw %cx, 0x20(%rsp) jne 1f movl %gs, %ecx cmpw %cx, 0x28(%rsp) jne 1f /* All segments match their saved values => Category 2 (Bad IRET). */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp pushq $0 /* RIP */ UNWIND_HINT_IRET_REGS offset=8 jmp general_protection 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall #endif /* CONFIG_XEN */ #if IS_ENABLED(CONFIG_HYPERV) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ hyperv_callback_vector hyperv_vector_handler apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ hyperv_reenlightenment_vector hyperv_reenlightenment_intr apicinterrupt3 HYPERV_STIMER0_VECTOR \ hv_stimer0_callback_vector hv_stimer0_vector_handler #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 create_gap=1 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN idtentry xennmi do_nmi has_error_code=0 idtentry xendebug do_debug has_error_code=0 #endif idtentry general_protection do_general_protection has_error_code=1 idtentry page_fault do_page_fault has_error_code=1 #ifdef CONFIG_KVM_GUEST idtentry async_page_fault do_async_page_fault has_error_code=1 #endif #ifdef CONFIG_X86_MCE idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* * Save all registers in pt_regs, and switch gs if needed. * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx rdmsr testl %edx, %edx js 1f /* negative -> in kernel */ SWAPGS xorl %ebx, %ebx 1: /* * Always stash CR3 in %r14. This value will be restored, * verbatim, at exit. Needed if paranoid_entry interrupted * another entry that already switched to the user CR3 value * but has not yet returned to userspace. * * This is also why CS (stashed in the "iret frame" by the * hardware at entry) can not be used: this may be a return * to kernel code, but with a user CR3 value. */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 /* * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an * unconditional CR3 write, even in the PTI case. So do an lfence * to prevent GS speculation, regardless of whether PTI is enabled. */ FENCE_SWAPGS_KERNEL_ENTRY ret END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked * only on return from non-NMI IST interrupts that came * from kernel space. * * We may be returning to very strange contexts (e.g. very early * in syscall entry), so checking for preemption here would * be complicated. Fortunately, we there's no good reason * to try to handle preemption here. * * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. */ ENTRY(error_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) jz .Lerror_kernelspace /* * We entered from user mode or we're pretending to have entered * from user mode due to an IRET fault. */ SWAPGS FENCE_SWAPGS_USER_ENTRY /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ popq %r12 /* save return addr in %12 */ movq %rsp, %rdi /* arg0 = pt_regs pointer */ call sync_regs movq %rax, %rsp /* switch stack */ ENCODE_FRAME_POINTER pushq %r12 /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). */ TRACE_IRQS_OFF CALL_enter_from_user_mode ret .Lerror_entry_done_lfence: FENCE_SWAPGS_KERNEL_ENTRY .Lerror_entry_done: TRACE_IRQS_OFF ret /* * There are two places in the kernel that can potentially fault with * usergs. Handle them here. B stepping K8s sometimes report a * truncated RIP for IRET exceptions returning to compat mode. Check * for these here too. */ .Lerror_kernelspace: leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) je .Lbstep_iret cmpq $.Lgs_change, RIP+8(%rsp) jne .Lerror_entry_done_lfence /* * hack: .Lgs_change can fail with user gsbase. If this happens, fix up * gsbase and proceed. We'll fix up the exception and land in * .Lgs_change's error handler with kernel gsbase. */ SWAPGS FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: /* Fix truncated RIP */ movq %rcx, RIP+8(%rsp) /* fall through */ .Lerror_bad_iret: /* * We came from an IRET to user mode, so we have user * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* * Pretend that the exception came from user mode: set up pt_regs * as if we faulted immediately after IRET. */ mov %rsp, %rdi call fixup_bad_iret mov %rax, %rsp jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testb $3, CS(%rsp) jz retint_kernel jmp retint_user END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, * so we can use real assembly here. * * Registers: * %r14: Used to save/restore the CR3 of the interrupted context * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ ENTRY(nmi) UNWIND_HINT_IRET_REGS /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. * This means that we can have nested NMIs where the next * NMI is using the top of the stack of the previous NMI. We * can't let it execute because the nested NMI will corrupt the * stack of the previous NMI. NMI handlers are not re-entrant * anyway. * * To handle this case we do the following: * Check the a special location on the stack that contains * a variable that is set when NMIs are executing. * The interrupted task's stack is also checked to see if it * is an NMI stack. * If the variable is not set and the stack is not the NMI * stack then: * o Set the special variable on the stack * o Copy the interrupt frame into an "outermost" location on the * stack * o Copy the interrupt frame into an "iret" location on the stack * o Continue processing the NMI * If the variable is set or the previous stack is the NMI stack: * o Modify the "iret" location to jump to the repeat_nmi * o return back to the first NMI * * Now on exit of the first NMI, we first clear the stack variable * The NMI stack will tell any nested NMIs at that point that it is * nested. Then we pop the stack normally with iret, and if there was * a nested NMI that updated the copy interrupt stack frame, a * jump will be made to the repeat_nmi code that will handle the second * NMI. * * However, espfix prevents us from directly returning to userspace * with a single IRET instruction. Similarly, IRET to user mode * can fault. We therefore handle NMIs from user space like * other IST entries. */ ASM_CLAC /* Use %rdx as our temp variable throughout */ pushq %rdx testb $3, CS-RIP+8(%rsp) jz .Lnmi_from_kernel /* * NMI from user mode. We need to run on the thread stack, but we * can't go through the normal entry paths: NMIs are masked, and * we don't want to enable interrupts, because then we'll end * up in an awkward situation in which IRQs are on but NMIs * are off. * * We also must not push anything to the stack before switching * stacks lest we corrupt the "NMI executing" variable. */ swapgs cld FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ pushq 3*8(%rdx) /* pt_regs->flags */ pushq 2*8(%rdx) /* pt_regs->cs */ pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're * done with the NMI stack. */ movq %rsp, %rdi movq $-1, %rsi call do_nmi /* * Return back to user mode. We must *not* do the normal exit * work, because we don't want to enable interrupts. */ jmp swapgs_restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* * Here's what our stack frame will look like: * +---------------------------------------------------------+ * | original SS | * | original Return RSP | * | original RFLAGS | * | original CS | * | original RIP | * +---------------------------------------------------------+ * | temp storage for rdx | * +---------------------------------------------------------+ * | "NMI executing" variable | * +---------------------------------------------------------+ * | iret SS } Copied from "outermost" frame | * | iret Return RSP } on each loop iteration; overwritten | * | iret RFLAGS } by a nested NMI to force another | * | iret CS } iteration if needed. | * | iret RIP } | * +---------------------------------------------------------+ * | outermost SS } initialized in first_nmi; | * | outermost Return RSP } will not be changed before | * | outermost RFLAGS } NMI processing is done. | * | outermost CS } Copied to "iret" frame on each | * | outermost RIP } iteration. | * +---------------------------------------------------------+ * | pt_regs | * +---------------------------------------------------------+ * * The "original" frame is used by hardware. Before re-enabling * NMIs, we need to be done with it, and we need to leave enough * space for the asm code here. * * We return by executing IRET while RSP points to the "iret" frame. * That will either return for real or it will loop back into NMI * processing. * * The "outermost" frame is copied to the "iret" frame on each * iteration of the loop, so each iteration starts with the "iret" * frame pointing to the final return target. */ /* * Determine whether we're a nested NMI. * * If we interrupted kernel code between repeat_nmi and * end_repeat_nmi, then we are a nested NMI. We must not * modify the "iret" frame because it's being written by * the outer NMI. That's okay; the outer NMI handler is * about to about to call do_nmi anyway, so we can just * resume the outer NMI. */ movq $repeat_nmi, %rdx cmpq 8(%rsp), %rdx ja 1f movq $end_repeat_nmi, %rdx cmpq 8(%rsp), %rdx ja nested_nmi_out 1: /* * Now check "NMI executing". If it's set, then we're nested. * This will not detect if we interrupted an outer NMI just * before IRET. */ cmpl $1, -8(%rsp) je nested_nmi /* * Now test if the previous stack was an NMI stack. This covers * the case where we interrupt an outer NMI after it clears * "NMI executing" but before IRET. We need to be careful, though: * there is one case in which RSP could point to the NMI stack * despite there being no NMI active: naughty userspace controls * RSP at the very beginning of the SYSCALL targets. We can * pull a fast one on naughty userspace, though: we program * SYSCALL to mask DF, so userspace cannot cause DF to be set * if it controls the kernel's RSP. We set DF before we clear * "NMI executing". */ lea 6*8(%rsp), %rdx /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ cmpq %rdx, 4*8(%rsp) /* If the stack pointer is above the NMI stack, this is a normal NMI */ ja first_nmi subq $EXCEPTION_STKSZ, %rdx cmpq %rdx, 4*8(%rsp) /* If it is below the NMI stack, it is a normal NMI */ jb first_nmi /* Ah, it is within the NMI stack. */ testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) jz first_nmi /* RSP was user controlled. */ /* This is a nested NMI. */ nested_nmi: /* * Modify the "iret" frame to point to repeat_nmi, forcing another * iteration of NMI handling. */ subq $8, %rsp leaq -10*8(%rsp), %rdx pushq $__KERNEL_DS pushq %rdx pushfq pushq $__KERNEL_CS pushq $repeat_nmi /* Put stack back */ addq $(6*8), %rsp nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ iretq first_nmi: /* Restore rdx. */ movq (%rsp), %rdx /* Make room for "NMI executing". */ pushq $0 /* Leave room for the "iret" frame */ subq $(5*8), %rsp /* Copy the "original" frame to the "outermost" frame */ .rept 5 pushq 11*8(%rsp) .endr UNWIND_HINT_IRET_REGS /* Everything up to here is safe from nested NMIs */ #ifdef CONFIG_DEBUG_ENTRY /* * For ease of testing, unmask NMIs right away. Disabled by * default because IRET is very expensive. */ pushq $0 /* SS */ pushq %rsp /* RSP (minus 8 because of the previous push) */ addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif repeat_nmi: /* * If there was a nested NMI, the first NMI's iret will return * here. But NMIs are still enabled and we can take another * nested NMI. The nested NMI checks the interrupted RIP to see * if it is between repeat_nmi and end_repeat_nmi, and if so * it will just return, as we are about to repeat an NMI anyway. * This makes it safe to copy to the stack frame that a nested * NMI will update. * * RSP is pointing to "outermost RIP". gsbase is unknown, but, if * we're repeating an NMI, gsbase has the same value that it had on * the first iteration. paranoid_entry will load the kernel * gsbase if needed before we call do_nmi. "NMI executing" * is zero. */ movq $1, 10*8(%rsp) /* Set "NMI executing". */ /* * Copy the "outermost" frame to the "iret" frame. NMIs that nest * here must not modify the "iret" frame while we're writing to * it or it will end up containing garbage. */ addq $(10*8), %rsp .rept 5 pushq -6*8(%rsp) .endr subq $(5*8), %rsp end_repeat_nmi: /* * Everything below this point can be preempted by a nested NMI. * If this happens, then the inner NMI will change the "iret" * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit * as we should not be calling schedule in NMI context. * Even with normal interrupts enabled. An NMI should not be * setting NEED_RESCHED or anything that normal interrupts and * exceptions might do. */ call paranoid_entry UNWIND_HINT_REGS /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp, %rdi movq $-1, %rsi call do_nmi /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: POP_REGS /* * Skip orig_ax and the "outermost" frame to point RSP at the "iret" * at the "iret" frame. */ addq $6*8, %rsp /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from * the SYSCALL entry and exit paths. * * We arguably should just inspect RIP instead, but I (Andy) wrote * this code when I had the misapprehension that Xen PV supported * NMIs, and Xen PV would break that approach. */ std movq $0, 5*8(%rsp) /* clear "NMI executing" */ /* * iretq reads the "iret" frame and exits the NMI stack in a * single instruction. We are returning to kernel mode, so this * cannot result in a fault. Similarly, we don't need to worry * about espfix64 on the way back to kernel mode. */ iretq END(nmi) ENTRY(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret END(ignore_sysret) ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS call do_exit END(rewind_stack_do_exit)
AirFortressIlikara/LS2K0300-linux-4.19
37,464
arch/x86/entry/entry_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1991,1992 Linus Torvalds * * entry_32.S contains the system-call and low-level fault and trap handling routines. * * Stack layout while running C code: * ptrace needs to have all registers on the stack. * If the order here is changed, it needs to be * updated in fork.c:copy_process(), signal.c:do_signal(), * ptrace.c and ptrace.h * * 0(%esp) - %ebx * 4(%esp) - %ecx * 8(%esp) - %edx * C(%esp) - %esi * 10(%esp) - %edi * 14(%esp) - %ebp * 18(%esp) - %eax * 1C(%esp) - %ds * 20(%esp) - %es * 24(%esp) - %fs * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS * 2C(%esp) - orig_eax * 30(%esp) - %eip * 34(%esp) - %cs * 38(%esp) - %eflags * 3C(%esp) - %oldesp * 40(%esp) - %oldss */ #include <linux/linkage.h> #include <linux/err.h> #include <asm/thread_info.h> #include <asm/irqflags.h> #include <asm/errno.h> #include <asm/segment.h> #include <asm/smp.h> #include <asm/percpu.h> #include <asm/processor-flags.h> #include <asm/irq_vectors.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/frame.h> #include <asm/nospec-branch.h> .section .entry.text, "ax" /* * We use macros for low-level operations which need to be overridden * for paravirtualization. The following will never clobber any registers: * INTERRUPT_RETURN (aka. "iret") * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). * * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). * Allowing a register to be clobbered can shrink the paravirt replacement * enough to patch inline, increasing performance. */ #ifdef CONFIG_PREEMPT # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF #else # define preempt_stop(clobbers) # define resume_kernel restore_all_kernel #endif .macro TRACE_IRQS_IRET #ifdef CONFIG_TRACE_IRQFLAGS testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? jz 1f TRACE_IRQS_ON 1: #endif .endm #define PTI_SWITCH_MASK (1 << PAGE_SHIFT) /* * User gs save/restore * * %gs is used for userland TLS and kernel only uses it for stack * canary which is required to be at %gs:20 by gcc. Read the comment * at the top of stackprotector.h for more info. * * Local labels 98 and 99 are used. */ #ifdef CONFIG_X86_32_LAZY_GS /* unfortunately push/pop can't be no-op */ .macro PUSH_GS pushl $0 .endm .macro POP_GS pop=0 addl $(4 + \pop), %esp .endm .macro POP_GS_EX .endm /* all the rest are no-op */ .macro PTGS_TO_GS .endm .macro PTGS_TO_GS_EX .endm .macro GS_TO_REG reg .endm .macro REG_TO_PTGS reg .endm .macro SET_KERNEL_GS reg .endm #else /* CONFIG_X86_32_LAZY_GS */ .macro PUSH_GS pushl %gs .endm .macro POP_GS pop=0 98: popl %gs .if \pop <> 0 add $\pop, %esp .endif .endm .macro POP_GS_EX .pushsection .fixup, "ax" 99: movl $0, (%esp) jmp 98b .popsection _ASM_EXTABLE(98b, 99b) .endm .macro PTGS_TO_GS 98: mov PT_GS(%esp), %gs .endm .macro PTGS_TO_GS_EX .pushsection .fixup, "ax" 99: movl $0, PT_GS(%esp) jmp 98b .popsection _ASM_EXTABLE(98b, 99b) .endm .macro GS_TO_REG reg movl %gs, \reg .endm .macro REG_TO_PTGS reg movl \reg, PT_GS(%esp) .endm .macro SET_KERNEL_GS reg movl $(__KERNEL_STACK_CANARY), \reg movl \reg, %gs .endm #endif /* CONFIG_X86_32_LAZY_GS */ /* Unconditionally switch to user cr3 */ .macro SWITCH_TO_USER_CR3 scratch_reg:req ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI movl %cr3, \scratch_reg orl $PTI_SWITCH_MASK, \scratch_reg movl \scratch_reg, %cr3 .Lend_\@: .endm .macro BUG_IF_WRONG_CR3 no_user_check=0 #ifdef CONFIG_DEBUG_ENTRY ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI .if \no_user_check == 0 /* coming from usermode? */ testl $SEGMENT_RPL_MASK, PT_CS(%esp) jz .Lend_\@ .endif /* On user-cr3? */ movl %cr3, %eax testl $PTI_SWITCH_MASK, %eax jnz .Lend_\@ /* From userspace with kernel cr3 - BUG */ ud2 .Lend_\@: #endif .endm /* * Switch to kernel cr3 if not already loaded and return current cr3 in * \scratch_reg */ .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI movl %cr3, \scratch_reg /* Test if we are already on kernel CR3 */ testl $PTI_SWITCH_MASK, \scratch_reg jz .Lend_\@ andl $(~PTI_SWITCH_MASK), \scratch_reg movl \scratch_reg, %cr3 /* Return original CR3 in \scratch_reg */ orl $PTI_SWITCH_MASK, \scratch_reg .Lend_\@: .endm .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 cld PUSH_GS pushl %fs pushl %es pushl %ds pushl \pt_regs_ax pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx movl $(__USER_DS), %edx movl %edx, %ds movl %edx, %es movl $(__KERNEL_PERCPU), %edx movl %edx, %fs SET_KERNEL_GS %edx /* Switch to kernel stack if necessary */ .if \switch_stacks > 0 SWITCH_TO_KERNEL_STACK .endif .endm .macro SAVE_ALL_NMI cr3_reg:req SAVE_ALL BUG_IF_WRONG_CR3 /* * Now switch the CR3 when PTI is enabled. * * We can enter with either user or kernel cr3, the code will * store the old cr3 in \cr3_reg and switches to the kernel cr3 * if necessary. */ SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg .Lend_\@: .endm .macro RESTORE_INT_REGS popl %ebx popl %ecx popl %edx popl %esi popl %edi popl %ebp popl %eax .endm .macro RESTORE_REGS pop=0 RESTORE_INT_REGS 1: popl %ds 2: popl %es 3: popl %fs POP_GS \pop .pushsection .fixup, "ax" 4: movl $0, (%esp) jmp 1b 5: movl $0, (%esp) jmp 2b 6: movl $0, (%esp) jmp 3b .popsection _ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(2b, 5b) _ASM_EXTABLE(3b, 6b) POP_GS_EX .endm .macro RESTORE_ALL_NMI cr3_reg:req pop=0 /* * Now switch the CR3 when PTI is enabled. * * We enter with kernel cr3 and switch the cr3 to the value * stored on \cr3_reg, which is either a user or a kernel cr3. */ ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI testl $PTI_SWITCH_MASK, \cr3_reg jz .Lswitched_\@ /* User cr3 in \cr3_reg - write it to hardware cr3 */ movl \cr3_reg, %cr3 .Lswitched_\@: BUG_IF_WRONG_CR3 RESTORE_REGS pop=\pop .endm .macro CHECK_AND_APPLY_ESPFIX #ifdef CONFIG_X86_ESPFIX32 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS /* * Warning: PT_OLDSS(%esp) contains the wrong/random values if we * are returning to the kernel. * See comments in process.c:copy_thread() for details. */ movb PT_OLDSS(%esp), %ah movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax jne .Lend_\@ # returning to user-space with LDT SS /* * Setup and switch to ESPFIX stack * * We're returning to userspace with a 16 bit stack. The CPU will not * restore the high word of ESP for us on executing iret... This is an * "official" bug of all the x86-compatible CPUs, which we can work * around to make dosemu and wine happy. We do this by preloading the * high word of ESP with the high word of the userspace ESP while * compensating for the offset by changing to the ESPFIX segment with * a base address that matches for the difference. */ mov %esp, %edx /* load kernel esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov %dx, %ax /* eax: new kernel esp */ sub %eax, %edx /* offset (low word is 0) */ shr $16, %edx mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ pushl $__ESPFIX_SS pushl %eax /* new kernel esp */ /* * Disable interrupts, but do not irqtrace this section: we * will soon execute iret and the tracer was already set to * the irqstate after the IRET: */ DISABLE_INTERRUPTS(CLBR_ANY) lss (%esp), %esp /* switch to espfix segment */ .Lend_\@: #endif /* CONFIG_X86_ESPFIX32 */ .endm /* * Called with pt_regs fully populated and kernel segments loaded, * so we can access PER_CPU and use the integer registers. * * We need to be very careful here with the %esp switch, because an NMI * can happen everywhere. If the NMI handler finds itself on the * entry-stack, it will overwrite the task-stack and everything we * copied there. So allocate the stack-frame on the task-stack and * switch to it before we do any copying. */ #define CS_FROM_ENTRY_STACK (1 << 31) #define CS_FROM_USER_CR3 (1 << 30) .macro SWITCH_TO_KERNEL_STACK ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV BUG_IF_WRONG_CR3 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax /* * %eax now contains the entry cr3 and we carry it forward in * that register for the time this macro runs */ /* * The high bits of the CS dword (__csh) are used for * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case * hardware didn't do this for us. */ andl $(0x0000ffff), PT_CS(%esp) /* Are we on the entry stack? Bail out if not! */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ cmpl $SIZEOF_entry_stack, %ecx jae .Lend_\@ /* Load stack pointer into %esi and %edi */ movl %esp, %esi movl %esi, %edi /* Move %edi to the top of the entry stack */ andl $(MASK_entry_stack), %edi addl $(SIZEOF_entry_stack), %edi /* Load top of task-stack into %edi */ movl TSS_entry2task_stack(%edi), %edi /* Special case - entry from kernel mode via entry stack */ #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS movb PT_CS(%esp), %cl andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx #else movl PT_CS(%esp), %ecx andl $SEGMENT_RPL_MASK, %ecx #endif cmpl $USER_RPL, %ecx jb .Lentry_from_kernel_\@ /* Bytes to copy */ movl $PTREGS_SIZE, %ecx #ifdef CONFIG_VM86 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) jz .Lcopy_pt_regs_\@ /* * Stack-frame contains 4 additional segment registers when * coming from VM86 mode */ addl $(4 * 4), %ecx #endif .Lcopy_pt_regs_\@: /* Allocate frame on task-stack */ subl %ecx, %edi /* Switch to task-stack */ movl %edi, %esp /* * We are now on the task-stack and can safely copy over the * stack-frame */ shrl $2, %ecx cld rep movsl jmp .Lend_\@ .Lentry_from_kernel_\@: /* * This handles the case when we enter the kernel from * kernel-mode and %esp points to the entry-stack. When this * happens we need to switch to the task-stack to run C code, * but switch back to the entry-stack again when we approach * iret and return to the interrupted code-path. This usually * happens when we hit an exception while restoring user-space * segment registers on the way back to user-space or when the * sysenter handler runs with eflags.tf set. * * When we switch to the task-stack here, we can't trust the * contents of the entry-stack anymore, as the exception handler * might be scheduled out or moved to another CPU. Therefore we * copy the complete entry-stack to the task-stack and set a * marker in the iret-frame (bit 31 of the CS dword) to detect * what we've done on the iret path. * * On the iret path we copy everything back and switch to the * entry-stack, so that the interrupted kernel code-path * continues on the same stack it was interrupted with. * * Be aware that an NMI can happen anytime in this code. * * %esi: Entry-Stack pointer (same as %esp) * %edi: Top of the task stack * %eax: CR3 on kernel entry */ /* Calculate number of bytes on the entry stack in %ecx */ movl %esi, %ecx /* %ecx to the top of entry-stack */ andl $(MASK_entry_stack), %ecx addl $(SIZEOF_entry_stack), %ecx /* Number of bytes on the entry stack to %ecx */ sub %esi, %ecx /* Mark stackframe as coming from entry stack */ orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) /* * Test the cr3 used to enter the kernel and add a marker * so that we can switch back to it before iret. */ testl $PTI_SWITCH_MASK, %eax jz .Lcopy_pt_regs_\@ orl $CS_FROM_USER_CR3, PT_CS(%esp) /* * %esi and %edi are unchanged, %ecx contains the number of * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate * the stack-frame on task-stack and copy everything over */ jmp .Lcopy_pt_regs_\@ .Lend_\@: .endm /* * Switch back from the kernel stack to the entry stack. * * The %esp register must point to pt_regs on the task stack. It will * first calculate the size of the stack-frame to copy, depending on * whether we return to VM86 mode or not. With that it uses 'rep movsl' * to copy the contents of the stack over to the entry stack. * * We must be very careful here, as we can't trust the contents of the * task-stack once we switched to the entry-stack. When an NMI happens * while on the entry-stack, the NMI handler will switch back to the top * of the task stack, overwriting our stack-frame we are about to copy. * Therefore we switch the stack only after everything is copied over. */ .macro SWITCH_TO_ENTRY_STACK ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV /* Bytes to copy */ movl $PTREGS_SIZE, %ecx #ifdef CONFIG_VM86 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) jz .Lcopy_pt_regs_\@ /* Additional 4 registers to copy when returning to VM86 mode */ addl $(4 * 4), %ecx .Lcopy_pt_regs_\@: #endif /* Initialize source and destination for movsl */ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi subl %ecx, %edi movl %esp, %esi /* Save future stack pointer in %ebx */ movl %edi, %ebx /* Copy over the stack-frame */ shrl $2, %ecx cld rep movsl /* * Switch to entry-stack - needs to happen after everything is * copied because the NMI handler will overwrite the task-stack * when on entry-stack */ movl %ebx, %esp .Lend_\@: .endm /* * This macro handles the case when we return to kernel-mode on the iret * path and have to switch back to the entry stack and/or user-cr3 * * See the comments below the .Lentry_from_kernel_\@ label in the * SWITCH_TO_KERNEL_STACK macro for more details. */ .macro PARANOID_EXIT_TO_KERNEL_MODE /* * Test if we entered the kernel with the entry-stack. Most * likely we did not, because this code only runs on the * return-to-kernel path. */ testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) jz .Lend_\@ /* Unlikely slow-path */ /* Clear marker from stack-frame */ andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) /* Copy the remaining task-stack contents to entry-stack */ movl %esp, %esi movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi /* Bytes on the task-stack to ecx */ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx subl %esi, %ecx /* Allocate stack-frame on entry-stack */ subl %ecx, %edi /* * Save future stack-pointer, we must not switch until the * copy is done, otherwise the NMI handler could destroy the * contents of the task-stack we are about to copy. */ movl %edi, %ebx /* Do the copy */ shrl $2, %ecx cld rep movsl /* Safe to switch to entry-stack now */ movl %ebx, %esp /* * We came from entry-stack and need to check if we also need to * switch back to user cr3. */ testl $CS_FROM_USER_CR3, PT_CS(%esp) jz .Lend_\@ /* Clear marker from stack-frame */ andl $(~CS_FROM_USER_CR3), PT_CS(%esp) SWITCH_TO_USER_CR3 scratch_reg=%eax .Lend_\@: .endm /* * %eax: prev task * %edx: next task */ ENTRY(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame */ pushl %ebp pushl %ebx pushl %edi pushl %esi pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) movl TASK_threadsp(%edx), %esp #ifdef CONFIG_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif #ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated * with userspace addresses. On CPUs where those concerns * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif /* restore callee-saved registers */ popfl popl %esi popl %edi popl %ebx popl %ebp jmp __switch_to END(__switch_to_asm) /* * The unwinder expects the last frame on the stack to always be at the same * offset from the end of the page, which allows it to validate the stack. * Calling schedule_tail() directly would break that convention because its an * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ ENTRY(schedule_tail_wrapper) FRAME_BEGIN pushl %eax call schedule_tail popl %eax FRAME_END ret ENDPROC(schedule_tail_wrapper) /* * A newly forked process directly context switches into this address. * * eax: prev task we switched from * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ ENTRY(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx jnz 1f /* kernel threads are uncommon */ 2: /* When we fork, we trace the syscall return in the child, too. */ movl %esp, %eax call syscall_return_slowpath jmp restore_all /* kernel thread */ 1: movl %edi, %eax CALL_NOSPEC %ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() * syscall. */ movl $0, PT_EAX(%esp) jmp 2b END(ret_from_fork) /* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to * go as quickly as possible which is why some of this is * less clear than it otherwise should be. */ # userspace resumption stub bypassing syscall exit tracing ALIGN ret_from_exception: preempt_stop(CLBR_ANY) ret_from_intr: #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax #else /* * We can be coming here from child spawned by kernel_thread(). */ movl PT_CS(%esp), %eax andl $SEGMENT_RPL_MASK, %eax #endif cmpl $USER_RPL, %eax jb resume_kernel # not returning to v8086 or userspace ENTRY(resume_userspace) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax call prepare_exit_to_usermode jmp restore_all END(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) .Lneed_resched: cmpl $0, PER_CPU_VAR(__preempt_count) jnz restore_all_kernel testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all_kernel call preempt_schedule_irq jmp .Lneed_resched END(resume_kernel) #endif GLOBAL(__begin_SYSENTER_singlestep_region) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. * There is absolutely nothing that we can do to prevent this from happening * (thanks Intel!). To keep our handling of this situation as simple as * possible, we handle TF just like AC and NT, except that our #DB handler * will ignore all of the single-step traps generated in this range. */ #ifdef CONFIG_XEN /* * Xen doesn't set %esp to be precisely what the normal SYSENTER * entry point expects, so fix it up before using the normal path. */ ENTRY(xen_sysenter_target) addl $5*4, %esp /* remove xen-provided frame */ jmp .Lsysenter_past_esp #endif /* * 32-bit SYSENTER entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * if X86_FEATURE_SEP is available. This is the preferred system call * entry on 32-bit systems. * * The SYSENTER instruction, in principle, should *only* occur in the * vDSO. In practice, a small number of Android devices were shipped * with a copy of Bionic that inlined a SYSENTER instruction. This * never happened in any of Google's Bionic versions -- it only happened * in a narrow range of Intel-provided versions. * * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. * IF and VM in RFLAGS are cleared (IOW: interrupts are off). * SYSENTER does not save anything on the stack, * and does not save old EIP (!!!), ESP, or EFLAGS. * * To avoid losing track of EFLAGS.VM (and thus potentially corrupting * user and/or vm86 state), we explicitly disable the SYSENTER * instruction in vm86 mode by reprogramming the MSRs. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp user stack * 0(%ebp) arg6 */ ENTRY(entry_SYSENTER_32) /* * On entry-stack with all userspace-regs live - save and * restore eflags and %eax to use it as scratch-reg for the cr3 * switch. */ pushfl pushl %eax BUG_IF_WRONG_CR3 no_user_check=1 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax popl %eax popfl /* Stack empty again, switch to task stack */ movl TSS_entry2task_stack(%esp), %esp .Lsysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl %ebp /* pt_regs->sp (stashed in bp) */ pushfl /* pt_regs->flags (except IF = 0) */ orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ pushl $__USER_CS /* pt_regs->cs */ pushl $0 /* pt_regs->ip = 0 (placeholder) */ pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ /* * SYSENTER doesn't filter flags, so we need to clear NT, AC * and TF ourselves. To save a few cycles, we can check whether * either was set instead of doing an unconditional popfq. * This needs to happen before enabling interrupts so that * we don't get preempted with NT set. * * If TF is set, we will single-step all the way to here -- do_debug * will ignore all the traps. (Yes, this is slow, but so is * single-stepping in general. This allows us to avoid having * a more complicated code to handle the case where a user program * forces us to single-step through the SYSENTER entry code.) * * NB.: .Lsysenter_fix_flags is a label with the code under it moved * out-of-line as an optimization: NT is unlikely to be set in the * majority of the cases and instead of polluting the I$ unnecessarily, * we're keeping that code behind a branch which will predict as * not-taken and therefore its instructions won't be fetched. */ testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) jnz .Lsysenter_fix_flags .Lsysenter_flags_fixed: /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF movl %esp, %eax call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV /* Opportunistic SYSEXIT */ TRACE_IRQS_ON /* User mode traces as IRQs on. */ /* * Setup entry stack - we keep the pointer in %eax and do the * switch after almost all user-state is restored. */ /* Load entry stack pointer and allocate frame for eflags/eax */ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax subl $(2*4), %eax /* Copy eflags and eax to entry stack */ movl PT_EFLAGS(%esp), %edi movl PT_EAX(%esp), %esi movl %edi, (%eax) movl %esi, 4(%eax) /* Restore user registers and segments */ movl PT_EIP(%esp), %edx /* pt_regs->ip */ movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 1: mov PT_FS(%esp), %fs PTGS_TO_GS popl %ebx /* pt_regs->bx */ addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ popl %esi /* pt_regs->si */ popl %edi /* pt_regs->di */ popl %ebp /* pt_regs->bp */ /* Switch to entry stack */ movl %eax, %esp /* Now ready to switch the cr3 */ SWITCH_TO_USER_CR3 scratch_reg=%eax /* * Restore all flags except IF. (We restore IF separately because * STI gives a one-instruction window in which we won't be interrupted, * whereas POPF does not.) */ btrl $X86_EFLAGS_IF_BIT, (%esp) BUG_IF_WRONG_CR3 no_user_check=1 popfl popl %eax /* * Return back to the vDSO, which will pop ecx and edx. * Don't bother with DS and ES (they already contain __USER_DS). */ sti sysexit .pushsection .fixup, "ax" 2: movl $0, PT_FS(%esp) jmp 1b .popsection _ASM_EXTABLE(1b, 2b) PTGS_TO_GS_EX .Lsysenter_fix_flags: pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed GLOBAL(__end_SYSENTER_singlestep_region) ENDPROC(entry_SYSENTER_32) /* * 32-bit legacy system call entry. * * 32-bit x86 Linux system calls traditionally used the INT $0x80 * instruction. INT $0x80 lands here. * * This entry point can be used by any 32-bit perform system calls. * Instances of INT $0x80 can be found inline in various programs and * libraries. It is also used by the vDSO's __kernel_vsyscall * fallback for hardware that doesn't support a faster entry method. * Restarted 32-bit system calls also fall back to INT $0x80 * regardless of what instruction was originally used to do the system * call. (64-bit programs can use INT $0x80 as well, but they can * only run on 64-bit kernels and therefore land in * entry_INT80_compat.) * * This is considered a slow path. It is not used by most libc * implementations on modern hardware except during process startup. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp arg6 */ ENTRY(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ /* * User mode is traced as though IRQs are on, and the interrupt gate * turned them off. */ TRACE_IRQS_OFF movl %esp, %eax call do_int80_syscall_32 .Lsyscall_32_done: restore_all: TRACE_IRQS_IRET SWITCH_TO_ENTRY_STACK .Lrestore_all_notrace: CHECK_AND_APPLY_ESPFIX .Lrestore_nocheck: /* Switch back to user CR3 */ SWITCH_TO_USER_CR3 scratch_reg=%eax BUG_IF_WRONG_CR3 /* Restore user state */ RESTORE_REGS pop=4 # skip orig_eax/error_code .Lirq_return: /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * when returning from IPI handler and when returning from * scheduler to user-space. */ INTERRUPT_RETURN restore_all_kernel: TRACE_IRQS_IRET PARANOID_EXIT_TO_KERNEL_MODE BUG_IF_WRONG_CR3 RESTORE_REGS 4 jmp .Lirq_return .section .fixup, "ax" ENTRY(iret_exc ) pushl $0 # no error code pushl $do_iret_error #ifdef CONFIG_DEBUG_ENTRY /* * The stack-frame here is the one that iret faulted on, so its a * return-to-user frame. We are on kernel-cr3 because we come here from * the fixup code. This confuses the CR3 checker, so switch to user-cr3 * as the checker expects it. */ pushl %eax SWITCH_TO_USER_CR3 scratch_reg=%eax popl %eax #endif jmp common_exception .previous _ASM_EXTABLE(.Lirq_return, iret_exc) ENDPROC(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* * Switch back for ESPFIX stack to the normal zerobased stack * * We can't call C functions using the ESPFIX stack. This code reads * the high word of the segment base from the GDT and swiches to the * normal stack and adjusts ESP with the matching offset. */ #ifdef CONFIG_X86_ESPFIX32 /* fixup the stack */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ shl $16, %eax addl %esp, %eax /* the adjusted stack pointer */ pushl $__KERNEL_DS pushl %eax lss (%esp), %esp /* switch to the normal stack segment */ #endif .endm .macro UNWIND_ESPFIX_STACK #ifdef CONFIG_X86_ESPFIX32 movl %ss, %eax /* see if on espfix stack */ cmpw $__ESPFIX_SS, %ax jne 27f movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es /* switch to normal stack */ FIXUP_ESPFIX_STACK 27: #endif .endm /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 ENTRY(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ vector=vector+1 jmp common_interrupt .align 8 .endr END(irq_entries_start) #ifdef CONFIG_X86_LOCAL_APIC .align 8 ENTRY(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ vector=vector+1 jmp common_spurious .align 8 .endr END(spurious_entries_start) common_spurious: ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL switch_stacks=1 ENCODE_FRAME_POINTER TRACE_IRQS_OFF movl %esp, %eax call smp_spurious_interrupt jmp ret_from_intr ENDPROC(common_spurious) #endif /* * the CPU automatically disables interrupts when executing an IRQ vector, * so IRQ-flags tracing has to follow that: */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL switch_stacks=1 ENCODE_FRAME_POINTER TRACE_IRQS_OFF movl %esp, %eax call do_IRQ jmp ret_from_intr ENDPROC(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ ASM_CLAC; \ pushl $~(nr); \ SAVE_ALL switch_stacks=1; \ ENCODE_FRAME_POINTER; \ TRACE_IRQS_OFF \ movl %esp, %eax; \ call fn; \ jmp ret_from_intr; \ ENDPROC(name) #define BUILD_INTERRUPT(name, nr) \ BUILD_INTERRUPT3(name, nr, smp_##name); \ /* The include is where all of the SMP etc. interrupts come from */ #include <asm/entry_arch.h> ENTRY(coprocessor_error) ASM_CLAC pushl $0 pushl $do_coprocessor_error jmp common_exception END(coprocessor_error) ENTRY(simd_coprocessor_error) ASM_CLAC pushl $0 #ifdef CONFIG_X86_INVD_BUG /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ ALTERNATIVE "pushl $do_general_protection", \ "pushl $do_simd_coprocessor_error", \ X86_FEATURE_XMM #else pushl $do_simd_coprocessor_error #endif jmp common_exception END(simd_coprocessor_error) ENTRY(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception END(device_not_available) #ifdef CONFIG_PARAVIRT ENTRY(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) END(native_iret) #endif ENTRY(overflow) ASM_CLAC pushl $0 pushl $do_overflow jmp common_exception END(overflow) ENTRY(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception END(bounds) ENTRY(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception END(invalid_op) ENTRY(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception END(coprocessor_segment_overrun) ENTRY(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception END(invalid_TSS) ENTRY(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception END(segment_not_present) ENTRY(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception END(stack_segment) ENTRY(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception END(alignment_check) ENTRY(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception END(divide_error) #ifdef CONFIG_X86_MCE ENTRY(machine_check) ASM_CLAC pushl $0 pushl machine_check_vector jmp common_exception END(machine_check) #endif ENTRY(spurious_interrupt_bug) ASM_CLAC pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception END(spurious_interrupt_bug) #ifdef CONFIG_XEN ENTRY(xen_hypervisor_callback) pushl $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL ENCODE_FRAME_POINTER TRACE_IRQS_OFF /* * Check to see if we got the event in the critical * region in xen_iret_direct, after we've reenabled * events and checked for pending events. This simulates * iret instruction's behaviour where it delivers a * pending interrupt when enabling interrupts: */ movl PT_EIP(%esp), %eax cmpl $xen_iret_start_crit, %eax jb 1f cmpl $xen_iret_end_crit, %eax jae 1f jmp xen_iret_crit_fixup ENTRY(xen_do_upcall) 1: mov %esp, %eax call xen_evtchn_do_upcall #ifndef CONFIG_PREEMPT call xen_maybe_preempt_hcall #endif jmp ret_from_intr ENDPROC(xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. * We get here for two reasons: * 1. Fault while reloading DS, ES, FS or GS * 2. Fault while executing IRET * Category 1 we fix up by reattempting the load, and zeroing the segment * register if the load fails. * Category 2 we fix up by jumping to do_iret_error. We cannot use the * normal Linux return path in this case because if we use the IRET hypercall * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by maintaining a status value in EAX. */ ENTRY(xen_failsafe_callback) pushl %eax movl $1, %eax 1: mov 4(%esp), %ds 2: mov 8(%esp), %es 3: mov 12(%esp), %fs 4: mov 16(%esp), %gs /* EAX == 0 => Category 1 (Bad segment) EAX != 0 => Category 2 (Bad IRET) */ testl %eax, %eax popl %eax lea 16(%esp), %esp jz 5f jmp iret_exc 5: pushl $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL ENCODE_FRAME_POINTER jmp ret_from_exception .section .fixup, "ax" 6: xorl %eax, %eax movl %eax, 4(%esp) jmp 1b 7: xorl %eax, %eax movl %eax, 8(%esp) jmp 2b 8: xorl %eax, %eax movl %eax, 12(%esp) jmp 3b 9: xorl %eax, %eax movl %eax, 16(%esp) jmp 4b .previous _ASM_EXTABLE(1b, 6b) _ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(4b, 9b) ENDPROC(xen_failsafe_callback) BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, xen_evtchn_do_upcall) #endif /* CONFIG_XEN */ #if IS_ENABLED(CONFIG_HYPERV) BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, hyperv_vector_handler) BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, hyperv_reenlightenment_intr) BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, hv_stimer0_vector_handler) #endif /* CONFIG_HYPERV */ ENTRY(page_fault) ASM_CLAC pushl $do_page_fault ALIGN jmp common_exception END(page_fault) common_exception: /* the function address is in %gs's slot on the stack */ pushl %fs pushl %es pushl %ds pushl %eax movl $(__USER_DS), %eax movl %eax, %ds movl %eax, %es movl $(__KERNEL_PERCPU), %eax movl %eax, %fs pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx SWITCH_TO_KERNEL_STACK ENCODE_FRAME_POINTER cld UNWIND_ESPFIX_STACK GS_TO_REG %ecx movl PT_GS(%esp), %edi # get the function address movl PT_ORIG_EAX(%esp), %edx # get the error code movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart REG_TO_PTGS %ecx SET_KERNEL_GS %ecx TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception END(common_exception) ENTRY(debug) /* * Entry from sysenter is now handled in common_exception */ ASM_CLAC pushl $-1 # mark this as an int pushl $do_debug jmp common_exception END(debug) /* * NMI is doubly nasty. It can happen on the first instruction of * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ ENTRY(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 pushl %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax popl %eax je .Lnmi_espfix_stack #endif pushl %eax # pt_regs->orig_ax SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ call do_nmi jmp .Lnmi_return .Lnmi_from_sysenter_stack: /* * We're on the SYSENTER stack. Switch off. No one (not even debug) * is using the thread stack right now, so it's safe for us to use it. */ movl %esp, %ebx movl PER_CPU_VAR(cpu_current_top_of_stack), %esp call do_nmi movl %ebx, %esp .Lnmi_return: CHECK_AND_APPLY_ESPFIX RESTORE_ALL_NMI cr3_reg=%edi pop=4 jmp .Lirq_return #ifdef CONFIG_X86_ESPFIX32 .Lnmi_espfix_stack: /* * create the pointer to lss back */ pushl %ss pushl %esp addl $4, (%esp) /* copy the iret frame of 12 bytes */ .rept 3 pushl 16(%esp) .endr pushl %eax SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx, %edx # zero error code call do_nmi RESTORE_ALL_NMI cr3_reg=%edi lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif END(nmi) ENTRY(int3) ASM_CLAC pushl $-1 # mark this as an int SAVE_ALL switch_stacks=1 ENCODE_FRAME_POINTER TRACE_IRQS_OFF xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception END(int3) ENTRY(general_protection) ASM_CLAC pushl $do_general_protection jmp common_exception END(general_protection) #ifdef CONFIG_KVM_GUEST ENTRY(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception END(async_page_fault) #endif ENTRY(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp call do_exit 1: jmp 1b END(rewind_stack_do_exit)
AirFortressIlikara/LS2K0300-linux-4.19
13,808
arch/x86/entry/entry_64_compat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Compatibility mode system call entry point for x86-64. * * Copyright 2000-2002 Andi Kleen, SuSE Labs. */ #include "calling.h" #include <asm/asm-offsets.h> #include <asm/current.h> #include <asm/errno.h> #include <asm/ia32_unistd.h> #include <asm/thread_info.h> #include <asm/segment.h> #include <asm/irqflags.h> #include <asm/asm.h> #include <asm/smap.h> #include <linux/linkage.h> #include <linux/err.h> .section .entry.text, "ax" /* * 32-bit SYSENTER entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * on 64-bit kernels running on Intel CPUs. * * The SYSENTER instruction, in principle, should *only* occur in the * vDSO. In practice, a small number of Android devices were shipped * with a copy of Bionic that inlined a SYSENTER instruction. This * never happened in any of Google's Bionic versions -- it only happened * in a narrow range of Intel-provided versions. * * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. * IF and VM in RFLAGS are cleared (IOW: interrupts are off). * SYSENTER does not save anything on the stack, * and does not save old RIP (!!!), RSP, or RFLAGS. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp user stack * 0(%ebp) arg6 */ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS /* We are about to clobber %rsp anyway, clobbering here is OK */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* * User tracing code (ptrace or signal handlers) might assume that * the saved RAX contains a 32-bit number when we're invoking a 32-bit * syscall. Just in case the high bits are nonzero, zero-extend * the syscall number. (This could almost certainly be deleted * with no ill effects.) */ movl %eax, %eax /* Construct struct pt_regs on stack */ pushq $__USER32_DS /* pt_regs->ss */ pushq %rbp /* pt_regs->sp (stashed in bp) */ /* * Push flags. This is nasty. First, interrupts are currently * off, but we need pt_regs->flags to have IF set. Second, even * if TF was set when SYSENTER started, it's clear by now. We fix * that later using TIF_SINGLESTEP. */ pushfq /* pt_regs->flags (except IF = 0) */ orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq $0 /* pt_regs->ip = 0 (placeholder) */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ xorl %r12d, %r12d /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ xorl %r13d, %r13d /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ xorl %r15d, %r15d /* nospec r15 */ cld /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether * either was set instead of doing an unconditional popfq. * This needs to happen before enabling interrupts so that * we don't get preempted with NT set. * * If TF is set, we will single-step all the way to here -- do_debug * will ignore all the traps. (Yes, this is slow, but so is * single-stepping in general. This allows us to avoid having * a more complicated code to handle the case where a user program * forces us to single-step through the SYSENTER entry code.) * * NB.: .Lsysenter_fix_flags is a label with the code under it moved * out-of-line as an optimization: NT is unlikely to be set in the * majority of the cases and instead of polluting the I$ unnecessarily, * we're keeping that code behind a branch which will predict as * not-taken and therefore its instructions won't be fetched. */ testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) jnz .Lsysenter_fix_flags .Lsysenter_flags_fixed: /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV jmp sysret32_from_system_call .Lsysenter_fix_flags: pushq $X86_EFLAGS_FIXED popfq jmp .Lsysenter_flags_fixed GLOBAL(__end_entry_SYSENTER_compat) ENDPROC(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * on 64-bit kernels running on AMD CPUs. * * The SYSCALL instruction, in principle, should *only* occur in the * vDSO. In practice, it appears that this really is the case. * As evidence: * * - The calling convention for SYSCALL has changed several times without * anyone noticing. * * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything * user task that did SYSCALL without immediately reloading SS * would randomly crash. * * - Most programmers do not directly target AMD CPUs, and the 32-bit * SYSCALL instruction does not exist on Intel CPUs. Even on AMD * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels * because the SYSCALL instruction in legacy/native 32-bit mode (as * opposed to compat mode) is sufficiently poorly designed as to be * essentially unusable. * * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves * RFLAGS to R11, then loads new SS, CS, and RIP from previously * programmed MSRs. RFLAGS gets masked by a value from another MSR * (so CLD and CLAC are not needed). SYSCALL does not save anything on * the stack and does not change RSP. * * Note: RFLAGS saving+masking-with-MSR happens only in Long mode * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). * * Arguments: * eax system call number * ecx return address * ebx arg1 * ebp arg2 (note: not saved in the stack frame, should not be touched) * edx arg3 * esi arg4 * edi arg5 * esp user stack * 0(%esp) arg6 */ ENTRY(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs /* Stash user ESP */ movl %esp, %r8d /* Use %rsp as scratch reg. User ESP is stashed in r8 */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ pushq $__USER32_DS /* pt_regs->ss */ pushq %r8 /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_compat_after_hwframe) movl %eax, %eax /* discard orig_ax high bits */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ xorl %esi, %esi /* nospec si */ pushq %rdx /* pt_regs->dx */ xorl %edx, %edx /* nospec dx */ pushq %rbp /* pt_regs->cx (stashed in bp) */ xorl %ecx, %ecx /* nospec cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ xorl %r12d, %r12d /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ xorl %r13d, %r13d /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ xorl %r15d, %r15d /* nospec r15 */ /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV /* Opportunistic SYSRET */ sysret32_from_system_call: TRACE_IRQS_ON /* User mode traces as IRQs on. */ movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ addq $RAX, %rsp /* Skip r8-r15 */ popq %rax /* pt_regs->rax */ popq %rdx /* Skip pt_regs->cx */ popq %rdx /* pt_regs->dx */ popq %rsi /* pt_regs->si */ popq %rdi /* pt_regs->di */ /* * USERGS_SYSRET32 does: * GSBASE = user's GS base * EIP = ECX * RFLAGS = R11 * CS = __USER32_CS * SS = __USER_DS * * ECX will not match pt_regs->cx, but we're returning to a vDSO * trampoline that will fix up RCX, so this is okay. * * R12-R15 are callee-saved, so they contain whatever was in them * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ movq RSP-ORIG_RAX(%rsp), %rsp /* * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored * on the process stack which is not mapped to userspace and * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 * switch until after after the last reference to the process * stack. * * %r8/%r9 are zeroed before the sysret, thus safe to clobber. */ SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d swapgs sysretl END(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. * * 32-bit x86 Linux system calls traditionally used the INT $0x80 * instruction. INT $0x80 lands here. * * This entry point can be used by 32-bit and 64-bit programs to perform * 32-bit system calls. Instances of INT $0x80 can be found inline in * various programs and libraries. It is also used by the vDSO's * __kernel_vsyscall fallback for hardware that doesn't support a faster * entry method. Restarted 32-bit system calls also fall back to INT * $0x80 regardless of what instruction was originally used to do the * system call. * * This is considered a slow path. It is not used by most libc * implementations on modern hardware except during process startup. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp arg6 */ ENTRY(entry_INT80_compat) /* * Interrupts are off on entry. */ ASM_CLAC /* Do this early to minimize exposure */ SWAPGS /* * User tracing code (ptrace or signal handlers) might assume that * the saved RAX contains a 32-bit number when we're invoking a 32-bit * syscall. Just in case the high bits are nonzero, zero-extend * the syscall number. (This could almost certainly be deleted * with no ill effects.) */ movl %eax, %eax /* switch to thread stack expects orig_ax and rdi to be pushed */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ /* Need to switch before accessing the thread stack. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* In the Xen PV case we already run on the thread stack. */ ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp pushq 6*8(%rdi) /* regs->ss */ pushq 5*8(%rdi) /* regs->rsp */ pushq 4*8(%rdi) /* regs->eflags */ pushq 3*8(%rdi) /* regs->cs */ pushq 2*8(%rdi) /* regs->ip */ pushq 1*8(%rdi) /* regs->orig_ax */ pushq (%rdi) /* pt_regs->di */ .Lint80_keep_stack: pushq %rsi /* pt_regs->si */ xorl %esi, %esi /* nospec si */ pushq %rdx /* pt_regs->dx */ xorl %edx, %edx /* nospec dx */ pushq %rcx /* pt_regs->cx */ xorl %ecx, %ecx /* nospec cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ xorl %r9d, %r9d /* nospec r9 */ pushq %r10 /* pt_regs->r10*/ xorl %r10d, %r10d /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp */ xorl %ebp, %ebp /* nospec rbp */ pushq %r12 /* pt_regs->r12 */ xorl %r12d, %r12d /* nospec r12 */ pushq %r13 /* pt_regs->r13 */ xorl %r13d, %r13d /* nospec r13 */ pushq %r14 /* pt_regs->r14 */ xorl %r14d, %r14d /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ xorl %r15d, %r15d /* nospec r15 */ cld /* * User mode is traced as though IRQs are on, and the interrupt * gate turned them off. */ TRACE_IRQS_OFF movq %rsp, %rdi call do_int80_syscall_32 .Lsyscall_32_done: /* Go back to user mode. */ TRACE_IRQS_ON jmp swapgs_restore_regs_and_return_to_usermode END(entry_INT80_compat)
AirFortressIlikara/LS2K0300-linux-4.19
1,605
arch/x86/entry/thunk_64.S
/* * Save registers before calling assembly functions. This avoids * disturbance of register allocation in some inline assembly constructs. * Copyright 2001,2002 by Andi Kleen, SuSE Labs. * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc. * Subject to the GNU public license, v.2. No warranty of any kind. */ #include <linux/linkage.h> #include "calling.h" #include <asm/asm.h> #include <asm/export.h> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 .globl \name .type \name, @function \name: pushq %rbp movq %rsp, %rbp pushq %rdi pushq %rsi pushq %rdx pushq %rcx pushq %rax pushq %r8 pushq %r9 pushq %r10 pushq %r11 .if \put_ret_addr_in_rdi /* 8(%rbp) is return addr on stack */ movq 8(%rbp), %rdi .endif call \func jmp .L_restore _ASM_NOKPROBE(\name) .endm #ifdef CONFIG_TRACE_IRQFLAGS THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1 THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1 #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC THUNK lockdep_sys_exit_thunk,lockdep_sys_exit #endif #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule_notrace, preempt_schedule_notrace EXPORT_SYMBOL(___preempt_schedule) EXPORT_SYMBOL(___preempt_schedule_notrace) #endif #if defined(CONFIG_TRACE_IRQFLAGS) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_PREEMPT) .L_restore: popq %r11 popq %r10 popq %r9 popq %r8 popq %rax popq %rcx popq %rdx popq %rsi popq %rdi popq %rbp ret _ASM_NOKPROBE(.L_restore) #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,432
arch/x86/mm/mem_encrypt_boot.S
/* * AMD Memory Encryption Support * * Copyright (C) 2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include <asm/nospec-branch.h> .text .code64 ENTRY(sme_encrypt_execute) /* * Entry parameters: * RDI - virtual address for the encrypted mapping * RSI - virtual address for the decrypted mapping * RDX - length to encrypt * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) * - intermediate copy buffer (PMD_PAGE_SIZE) * R8 - physcial address of the pagetables to use for encryption */ push %rbp movq %rsp, %rbp /* RBP now has original stack pointer */ /* Set up a one page stack in the non-encrypted memory area */ movq %rcx, %rax /* Workarea stack page */ leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */ addq $PAGE_SIZE, %rax /* Workarea encryption routine */ push %r12 movq %rdi, %r10 /* Encrypted area */ movq %rsi, %r11 /* Decrypted area */ movq %rdx, %r12 /* Area length */ /* Copy encryption routine into the workarea */ movq %rax, %rdi /* Workarea encryption routine */ leaq __enc_copy(%rip), %rsi /* Encryption routine */ movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */ rep movsb /* Setup registers for call */ movq %r10, %rdi /* Encrypted area */ movq %r11, %rsi /* Decrypted area */ movq %r8, %rdx /* Pagetables used for encryption */ movq %r12, %rcx /* Area length */ movq %rax, %r8 /* Workarea encryption routine */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ ANNOTATE_RETPOLINE_SAFE call *%rax /* Call the encryption routine */ pop %r12 movq %rbp, %rsp /* Restore original stack pointer */ pop %rbp ret ENDPROC(sme_encrypt_execute) ENTRY(__enc_copy) /* * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since * the kernel will be encrypted during the process. So this * routine is defined here and then copied to an area outside * of the kernel where it will remain and run decrypted * during execution. * * On entry the registers must be: * RDI - virtual address for the encrypted mapping * RSI - virtual address for the decrypted mapping * RDX - address of the pagetables to use for encryption * RCX - length of area * R8 - intermediate copy buffer * * RAX - points to this routine * * The area will be encrypted by copying from the non-encrypted * memory space to an intermediate buffer and then copying from the * intermediate buffer back to the encrypted memory space. The physical * addresses of the two mappings are the same which results in the area * being encrypted "in place". */ /* Enable the new page tables */ mov %rdx, %cr3 /* Flush any global TLBs */ mov %cr4, %rdx andq $~X86_CR4_PGE, %rdx mov %rdx, %cr4 orq $X86_CR4_PGE, %rdx mov %rdx, %cr4 push %r15 push %r12 movq %rcx, %r9 /* Save area length */ movq %rdi, %r10 /* Save encrypted area address */ movq %rsi, %r11 /* Save decrypted area address */ /* Set the PAT register PA5 entry to write-protect */ movl $MSR_IA32_CR_PAT, %ecx rdmsr mov %rdx, %r15 /* Save original PAT value */ andl $0xffff00ff, %edx /* Clear PA5 */ orl $0x00000500, %edx /* Set PA5 to WP */ wrmsr wbinvd /* Invalidate any cache entries */ /* Copy/encrypt up to 2MB at a time */ movq $PMD_PAGE_SIZE, %r12 1: cmpq %r12, %r9 jnb 2f movq %r9, %r12 2: movq %r11, %rsi /* Source - decrypted area */ movq %r8, %rdi /* Dest - intermediate copy buffer */ movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ movq %r10, %rdi /* Dest - encrypted area */ movq %r12, %rcx rep movsb addq %r12, %r11 addq %r12, %r10 subq %r12, %r9 /* Kernel length decrement */ jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ movl $MSR_IA32_CR_PAT, %ecx rdmsr mov %r15, %rdx /* Restore original PAT value */ wrmsr pop %r12 pop %r15 ret .L__enc_copy_end: ENDPROC(__enc_copy)
AirFortressIlikara/LS2K0300-linux-4.19
1,624
arch/x86/um/vdso/vdso-layout.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Linker script for vDSO. This is an ELF shared object prelinked to * its virtual address, and with only one read-only segment. * This script controls its layout. */ SECTIONS { . = VDSO_PRELINK + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) } :text .data : { *(.data*) *(.sdata*) *(.got.plt) *(.got) *(.gnu.linkonce.d.*) *(.bss*) *(.dynbss*) *(.gnu.linkonce.b.*) } .altinstructions : { *(.altinstructions) } .altinstr_replacement : { *(.altinstr_replacement) } /* * Align the actual code well away from the non-instruction data. * This is the best thing for the I-cache. */ . = ALIGN(0x100); .text : { *(.text*) } :text =0x90909090 } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; }
AirFortressIlikara/LS2K0300-linux-4.19
4,455
arch/x86/realmode/rm/trampoline_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Trampoline.S Derived from Setup.S by Linus Torvalds * * 4 Jan 1997 Michael Chastain: changed to gnu as. * 15 Sept 2005 Eric Biederman: 64bit PIC support * * Entry: CS:IP point to the start of our code, we are * in real mode with no stack, but the rest of the * trampoline page to make our stack and everything else * is a mystery. * * On entry to trampoline_start, the processor is in real mode * with 16-bit addressing and 16-bit data. CS has some value * and IP is zero. Thus, data addresses need to be absolute * (no relocation) and are taken with regard to r_base. * * With the addition of trampoline_level4_pgt this code can * now enter a 64bit kernel that lives at arbitrary 64bit * physical addresses. * * If you work on this file, check the object module with objdump * --full-contents --reloc to make sure there are no relocation * entries. */ #include <linux/linkage.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/segment.h> #include <asm/processor-flags.h> #include <asm/realmode.h> #include "realmode.h" .text .code16 .balign PAGE_SIZE ENTRY(trampoline_start) cli # We should be safe anyway wbinvd LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds mov %ax, %es mov %ax, %ss movl $0xA5A5A5A5, trampoline_status # write marker for master knows we're running # Setup stack movl $rm_stack_end, %esp call verify_cpu # Verify the cpu supports long mode testl %eax, %eax # Check for return code jnz no_longmode /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ lidtl tr_idt # load idt with 0, 0 lgdtl tr_gdt # load gdt with whatever is appropriate movw $__KERNEL_DS, %dx # Data segment descriptor # Enable protected mode movl $X86_CR0_PE, %eax # protected mode (PE) bit movl %eax, %cr0 # into protected mode # flush prefetch and jump to startup_32 ljmpl $__KERNEL32_CS, $pa_startup_32 no_longmode: hlt jmp no_longmode #include "../kernel/verify_cpu.S" .section ".text32","ax" .code32 .balign 4 ENTRY(startup_32) movl %edx, %ss addl $pa_real_mode_base, %esp movl %edx, %ds movl %edx, %es movl %edx, %fs movl %edx, %gs /* * Check for memory encryption support. This is a safety net in * case BIOS hasn't done the necessary step of setting the bit in * the MSR for this AP. If SME is active and we've gotten this far * then it is safe for us to set the MSR bit and continue. If we * don't we'll eventually crash trying to execute encrypted * instructions. */ btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags jnc .Ldone movl $MSR_K8_SYSCFG, %ecx rdmsr bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax jc .Ldone /* * Memory encryption is enabled but the SME enable bit for this * CPU has has not been set. It is safe to set it, so do so. */ wrmsr .Ldone: movl pa_tr_cr4, %eax movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables movl $pa_trampoline_pgd, %eax movl %eax, %cr3 # Set up EFER movl pa_tr_efer, %eax movl pa_tr_efer + 4, %edx movl $MSR_EFER, %ecx wrmsr # Enable paging and in turn activate Long Mode movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax movl %eax, %cr0 /* * At this point we're in long mode but in 32bit compatibility mode * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. */ ljmpl $__KERNEL_CS, $pa_startup_64 .section ".text64","ax" .code64 .balign 4 ENTRY(startup_64) # Now jump into the kernel using virtual addresses jmpq *tr_start(%rip) .section ".rodata","a" # Duplicate the global descriptor table # so the kernel can live anywhere .balign 16 .globl tr_gdt tr_gdt: .short tr_gdt_end - tr_gdt - 1 # gdt limit .long pa_tr_gdt .short 0 .quad 0x00cf9b000000ffff # __KERNEL32_CS .quad 0x00af9b000000ffff # __KERNEL_CS .quad 0x00cf93000000ffff # __KERNEL_DS tr_gdt_end: .bss .balign PAGE_SIZE GLOBAL(trampoline_pgd) .space PAGE_SIZE .balign 8 GLOBAL(trampoline_header) tr_start: .space 8 GLOBAL(tr_efer) .space 8 GLOBAL(tr_cr4) .space 4 GLOBAL(tr_flags) .space 4 END(trampoline_header) #include "trampoline_common.S"
AirFortressIlikara/LS2K0300-linux-4.19
1,871
arch/x86/realmode/rm/trampoline_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Trampoline.S Derived from Setup.S by Linus Torvalds * * 4 Jan 1997 Michael Chastain: changed to gnu as. * * This is only used for booting secondary CPUs in SMP machine * * Entry: CS:IP point to the start of our code, we are * in real mode with no stack, but the rest of the * trampoline page to make our stack and everything else * is a mystery. * * We jump into arch/x86/kernel/head_32.S. * * On entry to trampoline_start, the processor is in real mode * with 16-bit addressing and 16-bit data. CS has some value * and IP is zero. Thus, we load CS to the physical segment * of the real mode code before doing anything further. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include "realmode.h" .text .code16 .balign PAGE_SIZE ENTRY(trampoline_start) wbinvd # Needed for NUMA-Q should be harmless for others LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds cli # We should be safe anyway movl tr_start, %eax # where we need to go movl $0xA5A5A5A5, trampoline_status # write marker for master knows we're running /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ lidtl tr_idt # load idt with 0, 0 lgdtl tr_gdt # load gdt with whatever is appropriate movw $1, %dx # protected mode (PE) bit lmsw %dx # into protected mode ljmpl $__BOOT_CS, $pa_startup_32 .section ".text32","ax" .code32 ENTRY(startup_32) # note: also used from wakeup_asm.S jmp *%eax .bss .balign 8 GLOBAL(trampoline_header) tr_start: .space 4 tr_gdt_pad: .space 2 tr_gdt: .space 6 END(trampoline_header) #include "trampoline_common.S"
AirFortressIlikara/LS2K0300-linux-4.19
3,765
arch/x86/realmode/rm/wakeup_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ACPI wakeup real mode startup stub */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/msr-index.h> #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/processor-flags.h> #include "realmode.h" #include "wakeup.h" .code16 /* This should match the structure in wakeup.h */ .section ".data", "aw" .balign 16 GLOBAL(wakeup_header) video_mode: .short 0 /* Video mode number */ pmode_entry: .long 0 pmode_cs: .short __KERNEL_CS pmode_cr0: .long 0 /* Saved %cr0 */ pmode_cr3: .long 0 /* Saved %cr3 */ pmode_cr4: .long 0 /* Saved %cr4 */ pmode_efer: .quad 0 /* Saved EFER */ pmode_gdt: .quad 0 pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */ pmode_behavior: .long 0 /* Wakeup behavior flags */ realmode_flags: .long 0 real_magic: .long 0 signature: .long WAKEUP_HEADER_SIGNATURE END(wakeup_header) .text .code16 .balign 16 ENTRY(wakeup_start) cli cld LJMPW_RM(3f) 3: /* Apparently some dimwit BIOS programmers don't know how to program a PM to RM transition, and we might end up here with junk in the data segment descriptor registers. The only way to repair that is to go into PM and fix it ourselves... */ movw $16, %cx lgdtl %cs:wakeup_gdt movl %cr0, %eax orb $X86_CR0_PE, %al movl %eax, %cr0 ljmpw $8, $2f 2: movw %cx, %ds movw %cx, %es movw %cx, %ss movw %cx, %fs movw %cx, %gs andb $~X86_CR0_PE, %al movl %eax, %cr0 LJMPW_RM(3f) 3: /* Set up segments */ movw %cs, %ax movw %ax, %ss movl $rm_stack_end, %esp movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs lidtl wakeup_idt /* Clear the EFLAGS */ pushl $0 popfl /* Check header signature... */ movl signature, %eax cmpl $WAKEUP_HEADER_SIGNATURE, %eax jne bogus_real_magic /* Check we really have everything... */ movl end_signature, %eax cmpl $REALMODE_END_SIGNATURE, %eax jne bogus_real_magic /* Call the C code */ calll main /* Restore MISC_ENABLE before entering protected mode, in case BIOS decided to clear XD_DISABLE during S3. */ movl pmode_behavior, %edi btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %edi jnc 1f movl pmode_misc_en, %eax movl pmode_misc_en + 4, %edx movl $MSR_IA32_MISC_ENABLE, %ecx wrmsr 1: /* Do any other stuff... */ #ifndef CONFIG_64BIT /* This could also be done in C code... */ movl pmode_cr3, %eax movl %eax, %cr3 btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi jnc 1f movl pmode_cr4, %eax movl %eax, %cr4 1: btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi jnc 1f movl pmode_efer, %eax movl pmode_efer + 4, %edx movl $MSR_EFER, %ecx wrmsr 1: lgdtl pmode_gdt /* This really couldn't... */ movl pmode_entry, %eax movl pmode_cr0, %ecx movl %ecx, %cr0 ljmpl $__KERNEL_CS, $pa_startup_32 /* -> jmp *%eax in trampoline_32.S */ #else jmp trampoline_start #endif bogus_real_magic: 1: hlt jmp 1b .section ".rodata","a" /* * Set up the wakeup GDT. We set these up as Big Real Mode, * that is, with limits set to 4 GB. At least the Lenovo * Thinkpad X61 is known to need this for the video BIOS * initialization quirk to work; this is likely to also * be the case for other laptops or integrated video devices. */ .balign 16 GLOBAL(wakeup_gdt) .word 3*8-1 /* Self-descriptor */ .long pa_wakeup_gdt .word 0 .word 0xffff /* 16-bit code segment @ real_mode_base */ .long 0x9b000000 + pa_real_mode_base .word 0x008f /* big real mode */ .word 0xffff /* 16-bit data segment @ real_mode_base */ .long 0x93000000 + pa_real_mode_base .word 0x008f /* big real mode */ END(wakeup_gdt) .section ".rodata","a" .balign 8 /* This is the standard real-mode IDT */ .balign 16 GLOBAL(wakeup_idt) .word 0xffff /* limit */ .long 0 /* address */ .word 0 END(wakeup_idt)
AirFortressIlikara/LS2K0300-linux-4.19
4,185
arch/x86/realmode/rm/reboot.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include "realmode.h" /* * The following code and data reboots the machine by switching to real * mode and jumping to the BIOS reset entry point, as if the CPU has * really been reset. The previous version asked the keyboard * controller to pulse the CPU reset line, which is more thorough, but * doesn't work with at least one type of 486 motherboard. It is easy * to stop this code working; hence the copious comments. * * This code is called with the restart type (0 = BIOS, 1 = APM) in * the primary argument register (%eax for 32 bit, %edi for 64 bit). */ .section ".text32", "ax" .code32 ENTRY(machine_real_restart_asm) #ifdef CONFIG_X86_64 /* Switch to trampoline GDT as it is guaranteed < 4 GiB */ movl $__KERNEL_DS, %eax movl %eax, %ds lgdtl pa_tr_gdt /* Disable paging to drop us out of long mode */ movl %cr0, %eax andl $~X86_CR0_PG, %eax movl %eax, %cr0 ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off GLOBAL(machine_real_restart_paging_off) xorl %eax, %eax xorl %edx, %edx movl $MSR_EFER, %ecx wrmsr movl %edi, %eax #endif /* CONFIG_X86_64 */ /* Set up the IDT for real mode. */ lidtl pa_machine_real_restart_idt /* * Set up a GDT from which we can load segment descriptors for real * mode. The GDT is not used in real mode; it is just needed here to * prepare the descriptors. */ lgdtl pa_machine_real_restart_gdt /* * Load the data segment registers with 16-bit compatible values */ movl $16, %ecx movl %ecx, %ds movl %ecx, %es movl %ecx, %fs movl %ecx, %gs movl %ecx, %ss ljmpw $8, $1f /* * This is 16-bit protected mode code to disable paging and the cache, * switch to real mode and jump to the BIOS reset code. * * The instruction that switches to real mode by writing to CR0 must be * followed immediately by a far jump instruction, which set CS to a * valid value for real mode, and flushes the prefetch queue to avoid * running instructions that have already been decoded in protected * mode. * * Clears all the flags except ET, especially PG (paging), PE * (protected-mode enable) and TS (task switch for coprocessor state * save). Flushes the TLB after paging has been disabled. Sets CD and * NW, to disable the cache on a 486, and invalidates the cache. This * is more like the state of a 486 after reset. I don't know if * something else should be done for other chips. * * More could be done here to set up the registers as if a CPU reset had * occurred; hopefully real BIOSs don't assume much. This is not the * actual BIOS entry point, anyway (that is at 0xfffffff0). * * Most of this work is probably excessive, but it is what is tested. */ .text .code16 .balign 16 machine_real_restart_asm16: 1: xorl %ecx, %ecx movl %cr0, %edx andl $0x00000011, %edx orl $0x60000000, %edx movl %edx, %cr0 movl %ecx, %cr3 movl %cr0, %edx testl $0x60000000, %edx /* If no cache bits -> no wbinvd */ jz 2f wbinvd 2: andb $0x10, %dl movl %edx, %cr0 LJMPW_RM(3f) 3: andw %ax, %ax jz bios apm: movw $0x1000, %ax movw %ax, %ss movw $0xf000, %sp movw $0x5307, %ax movw $0x0001, %bx movw $0x0003, %cx int $0x15 /* This should never return... */ bios: ljmpw $0xf000, $0xfff0 .section ".rodata", "a" .balign 16 GLOBAL(machine_real_restart_idt) .word 0xffff /* Length - real mode default value */ .long 0 /* Base - real mode default value */ END(machine_real_restart_idt) .balign 16 GLOBAL(machine_real_restart_gdt) /* Self-pointer */ .word 0xffff /* Length - real mode default value */ .long pa_machine_real_restart_gdt .word 0 /* * 16-bit code segment pointing to real_mode_seg * Selector value 8 */ .word 0xffff /* Limit */ .long 0x9b000000 + pa_real_mode_base .word 0 /* * 16-bit data segment with the selector value 16 = 0x10 and * base value 0x100; since this is consistent with real mode * semantics we don't have to reload the segments once CR0.PE = 0. */ .quad GDT_ENTRY(0x0093, 0x100, 0xffff) END(machine_real_restart_gdt)
AirFortressIlikara/LS2K0300-linux-4.19
2,202
arch/x86/platform/olpc/xo1-wakeup.S
/* SPDX-License-Identifier: GPL-2.0 */ .text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable_32.h> .macro writepost,value movb $0x34, %al outb %al, $0x70 movb $\value, %al outb %al, $0x71 .endm wakeup_start: # OFW lands us here, running in protected mode, with a # kernel-compatible GDT already setup. # Clear any dangerous flags pushl $0 popfl writepost 0x31 # Set up %cr3 movl $initial_page_table - __PAGE_OFFSET, %eax movl %eax, %cr3 movl saved_cr4, %eax movl %eax, %cr4 movl saved_cr0, %eax movl %eax, %cr0 # Control registers were modified, pipeline resync is needed jmp 1f 1: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs lgdt saved_gdt lidt saved_idt lldt saved_ldt ljmp $(__KERNEL_CS),$1f 1: movl %cr3, %eax movl %eax, %cr3 wbinvd # Go back to the return point jmp ret_point save_registers: sgdt saved_gdt sidt saved_idt sldt saved_ldt pushl %edx movl %cr4, %edx movl %edx, saved_cr4 movl %cr0, %edx movl %edx, saved_cr0 popl %edx movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags ret restore_registers: movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl ret ENTRY(do_olpc_suspend_lowlevel) call save_processor_state call save_registers # This is the stack context we want to remember movl %esp, saved_context_esp pushl $3 call xo1_do_sleep jmp wakeup_start .p2align 4,,7 ret_point: movl saved_context_esp, %esp writepost 0x32 call restore_registers call restore_processor_state ret .data saved_gdt: .long 0,0 saved_idt: .long 0,0 saved_ldt: .long 0 saved_cr4: .long 0 saved_cr0: .long 0 saved_context_esp: .long 0 saved_context_edi: .long 0 saved_context_esi: .long 0 saved_context_ebx: .long 0 saved_context_ebp: .long 0 saved_context_eflags: .long 0
AirFortressIlikara/LS2K0300-linux-4.19
2,823
arch/x86/platform/efi/efi_stub_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * EFI call stub for IA32. * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. */ #include <linux/linkage.h> #include <asm/page_types.h> /* * efi_call_phys(void *, ...) is a function with variable parameters. * All the callers of this function assure that all the parameters are 4-bytes. */ /* * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. * So we'd better save all of them at the beginning of this function and restore * at the end no matter how many we use, because we can not assure EFI runtime * service functions will comply with gcc calling convention, too. */ .text ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * the values of these registers are the same. And, the corresponding * GDT entries are identical. So I will do nothing about segment reg * and GDT, but change GDT base register in prolog and epilog. */ /* * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET. * But to make it smoothly switch from virtual mode to flat mode. * The mapping of lower virtual memory has been created in prolog and * epilog. */ movl $1f, %edx subl $__PAGE_OFFSET, %edx jmp *%edx 1: /* * 2. Now on the top of stack is the return * address in the caller of efi_call_phys(), then parameter 1, * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ popl %edx movl %edx, saved_return_addr /* get the function pointer into ECX*/ popl %ecx movl %ecx, efi_rt_function_ptr movl $2f, %edx subl $__PAGE_OFFSET, %edx pushl %edx /* * 3. Clear PG bit in %CR0. */ movl %cr0, %edx andl $0x7fffffff, %edx movl %edx, %cr0 jmp 1f 1: /* * 4. Adjust stack pointer. */ subl $__PAGE_OFFSET, %esp /* * 5. Call the physical function. */ jmp *%ecx 2: /* * 6. After EFI runtime service returns, control will return to * following instruction. We'd better readjust stack pointer first. */ addl $__PAGE_OFFSET, %esp /* * 7. Restore PG bit */ movl %cr0, %edx orl $0x80000000, %edx movl %edx, %cr0 jmp 1f 1: /* * 8. Now restore the virtual mode from flat mode by * adding EIP with PAGE_OFFSET. */ movl $1f, %edx jmp *%edx 1: /* * 9. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. */ leal efi_rt_function_ptr, %edx movl (%edx), %ecx pushl %ecx /* * 10. Push the saved return address onto the stack and return. */ leal saved_return_addr, %edx movl (%edx), %ecx pushl %ecx ret ENDPROC(efi_call_phys) .previous .data saved_return_addr: .long 0 efi_rt_function_ptr: .long 0
AirFortressIlikara/LS2K0300-linux-4.19
3,117
arch/x86/platform/efi/efi_thunk_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Intel Corporation; author Matt Fleming * * Support for invoking 32-bit EFI runtime services from a 64-bit * kernel. * * The below thunking functions are only used after ExitBootServices() * has been called. This simplifies things considerably as compared with * the early EFI thunking because we can leave all the kernel state * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime * services from __KERNEL32_CS. This means we can continue to service * interrupts across an EFI mixed mode call. * * We do however, need to handle the fact that we're running in a full * 64-bit virtual address space. Things like the stack and instruction * addresses need to be accessible by the 32-bit firmware, so we rely on * using the identity mappings in the EFI page table to access the stack * and kernel text (see efi_setup_page_tables()). */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/segment.h> .text .code64 ENTRY(efi64_thunk) push %rbp push %rbx /* * Switch to 1:1 mapped 32-bit stack pointer. */ movq %rsp, efi_saved_sp(%rip) movq efi_scratch(%rip), %rsp /* * Calculate the physical address of the kernel text. */ movq $__START_KERNEL_map, %rax subq phys_base(%rip), %rax /* * Push some physical addresses onto the stack. This is easier * to do now in a code64 section while the assembler can address * 64-bit values. Note that all the addresses on the stack are * 32-bit. */ subq $16, %rsp leaq efi_exit32(%rip), %rbx subq %rax, %rbx movl %ebx, 8(%rsp) leaq __efi64_thunk(%rip), %rbx subq %rax, %rbx call *%rbx movq efi_saved_sp(%rip), %rsp pop %rbx pop %rbp retq ENDPROC(efi64_thunk) /* * We run this function from the 1:1 mapping. * * This function must be invoked with a 1:1 mapped stack. */ ENTRY(__efi64_thunk) movl %ds, %eax push %rax movl %es, %eax push %rax movl %ss, %eax push %rax subq $32, %rsp movl %esi, 0x0(%rsp) movl %edx, 0x4(%rsp) movl %ecx, 0x8(%rsp) movq %r8, %rsi movl %esi, 0xc(%rsp) movq %r9, %rsi movl %esi, 0x10(%rsp) leaq 1f(%rip), %rbx movq %rbx, func_rt_ptr(%rip) /* Switch to 32-bit descriptor */ pushq $__KERNEL32_CS leaq efi_enter32(%rip), %rax pushq %rax lretq 1: addq $32, %rsp pop %rbx movl %ebx, %ss pop %rbx movl %ebx, %es pop %rbx movl %ebx, %ds /* * Convert 32-bit status code into 64-bit. */ test %rax, %rax jz 1f movl %eax, %ecx andl $0x0fffffff, %ecx andl $0xf0000000, %eax shl $32, %rax or %rcx, %rax 1: ret ENDPROC(__efi64_thunk) ENTRY(efi_exit32) movq func_rt_ptr(%rip), %rax push %rax mov %rdi, %rax ret ENDPROC(efi_exit32) .code32 /* * EFI service pointer must be in %edi. * * The stack should represent the 32-bit calling convention. */ ENTRY(efi_enter32) movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %ss call *%edi /* We must preserve return value */ movl %eax, %edi movl 72(%esp), %eax pushl $__KERNEL_CS pushl %eax lret ENDPROC(efi_enter32) .data .balign 8 func_rt_ptr: .quad 0 efi_saved_sp: .quad 0
AirFortressIlikara/LS2K0300-linux-4.19
1,202
arch/x86/platform/efi/efi_stub_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Function calling ABI conversion from Linux to EFI for x86_64 * * Copyright (C) 2007 Intel Corp * Bibo Mao <bibo.mao@intel.com> * Huang Ying <ying.huang@intel.com> */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/msr.h> #include <asm/processor-flags.h> #include <asm/page_types.h> #define SAVE_XMM \ mov %rsp, %rax; \ subq $0x70, %rsp; \ and $~0xf, %rsp; \ mov %rax, (%rsp); \ mov %cr0, %rax; \ clts; \ mov %rax, 0x8(%rsp); \ movaps %xmm0, 0x60(%rsp); \ movaps %xmm1, 0x50(%rsp); \ movaps %xmm2, 0x40(%rsp); \ movaps %xmm3, 0x30(%rsp); \ movaps %xmm4, 0x20(%rsp); \ movaps %xmm5, 0x10(%rsp) #define RESTORE_XMM \ movaps 0x60(%rsp), %xmm0; \ movaps 0x50(%rsp), %xmm1; \ movaps 0x40(%rsp), %xmm2; \ movaps 0x30(%rsp), %xmm3; \ movaps 0x20(%rsp), %xmm4; \ movaps 0x10(%rsp), %xmm5; \ mov 0x8(%rsp), %rsi; \ mov %rsi, %cr0; \ mov (%rsp), %rsp ENTRY(efi_call) pushq %rbp movq %rsp, %rbp SAVE_XMM mov 16(%rbp), %rax subq $48, %rsp mov %r9, 32(%rsp) mov %rax, 40(%rsp) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx call *%rdi addq $48, %rsp RESTORE_XMM popq %rbp ret ENDPROC(efi_call)
AirFortressIlikara/LS2K0300-linux-4.19
2,974
arch/x86/kernel/acpi/wakeup_64.S
.text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/asm-offsets.h> #include <asm/frame.h> # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 .code64 /* * Hooray, we are in Long 64-bit mode (but still running in low memory) */ ENTRY(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax jne bogus_64_magic movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movq saved_rsp, %rsp movq saved_rbx, %rbx movq saved_rdi, %rdi movq saved_rsi, %rsi movq saved_rbp, %rbp movq saved_rip, %rax jmp *%rax ENDPROC(wakeup_long64) bogus_64_magic: jmp bogus_64_magic ENTRY(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp xorl %eax, %eax call save_processor_state movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) movq %rsi, pt_regs_si(%rax) movq %rdi, pt_regs_di(%rax) movq %rbx, pt_regs_bx(%rax) movq %rcx, pt_regs_cx(%rax) movq %rdx, pt_regs_dx(%rax) movq %r8, pt_regs_r8(%rax) movq %r9, pt_regs_r9(%rax) movq %r10, pt_regs_r10(%rax) movq %r11, pt_regs_r11(%rax) movq %r12, pt_regs_r12(%rax) movq %r13, pt_regs_r13(%rax) movq %r14, pt_regs_r14(%rax) movq %r15, pt_regs_r15(%rax) pushfq popq pt_regs_flags(%rax) movq $.Lresume_point, saved_rip(%rip) movq %rsp, saved_rsp movq %rbp, saved_rbp movq %rbx, saved_rbx movq %rdi, saved_rdi movq %rsi, saved_rsi addq $8, %rsp movl $3, %edi xorl %eax, %eax call x86_acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp .Lresume_point .align 4 .Lresume_point: /* We don't restore %rax, it must be 0 anyway */ movq $saved_context, %rax movq saved_context_cr4(%rax), %rbx movq %rbx, %cr4 movq saved_context_cr3(%rax), %rbx movq %rbx, %cr3 movq saved_context_cr2(%rax), %rbx movq %rbx, %cr2 movq saved_context_cr0(%rax), %rbx movq %rbx, %cr0 pushq pt_regs_flags(%rax) popfq movq pt_regs_sp(%rax), %rsp movq pt_regs_bp(%rax), %rbp movq pt_regs_si(%rax), %rsi movq pt_regs_di(%rax), %rdi movq pt_regs_bx(%rax), %rbx movq pt_regs_cx(%rax), %rcx movq pt_regs_dx(%rax), %rdx movq pt_regs_r8(%rax), %r8 movq pt_regs_r9(%rax), %r9 movq pt_regs_r10(%rax), %r10 movq pt_regs_r11(%rax), %r11 movq pt_regs_r12(%rax), %r12 movq pt_regs_r13(%rax), %r13 movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 #ifdef CONFIG_KASAN /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. */ movq %rsp, %rdi call kasan_unpoison_task_stack_below #endif xorl %eax, %eax addq $8, %rsp FRAME_END jmp restore_processor_state ENDPROC(do_suspend_lowlevel) .data ENTRY(saved_rbp) .quad 0 ENTRY(saved_rsi) .quad 0 ENTRY(saved_rdi) .quad 0 ENTRY(saved_rbx) .quad 0 ENTRY(saved_rip) .quad 0 ENTRY(saved_rsp) .quad 0 ENTRY(saved_magic) .quad 0
AirFortressIlikara/LS2K0300-linux-4.19
1,717
arch/x86/kernel/acpi/wakeup_32.S
.text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 .code32 ALIGN ENTRY(wakeup_pmode_return) wakeup_pmode_return: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %fs movw %ax, %gs movw $__USER_DS, %ax movw %ax, %ds movw %ax, %es # reload the gdt, as we need the full 32 bit address lidt saved_idt lldt saved_ldt ljmp $(__KERNEL_CS), $1f 1: movl %cr3, %eax movl %eax, %cr3 wbinvd # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp movl %cs:saved_magic, %eax cmpl $0x12345678, %eax jne bogus_magic # jump to place where we left off movl saved_eip, %eax jmp *%eax bogus_magic: jmp bogus_magic save_registers: sidt saved_idt sldt saved_ldt str saved_tss leal 4(%esp), %eax movl %eax, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags movl $ret_point, saved_eip ret restore_registers: movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl ret ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers pushl $3 call x86_acpi_enter_sleep_state addl $4, %esp # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover jmp ret_point .p2align 4,,7 ret_point: call restore_registers call restore_processor_state ret .data ALIGN ENTRY(saved_magic) .long 0 ENTRY(saved_eip) .long 0 # saved registers saved_idt: .long 0,0 saved_ldt: .long 0 saved_tss: .long 0
AirFortressIlikara/LS2K0300-linux-4.19
9,399
arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
/* * Header file for multi buffer SHA256 algorithm data structure * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ # Macros for defining data structures # Usage example #START_FIELDS # JOB_AES ### name size align #FIELD _plaintext, 8, 8 # pointer to plaintext #FIELD _ciphertext, 8, 8 # pointer to ciphertext #FIELD _IV, 16, 8 # IV #FIELD _keys, 8, 8 # pointer to keys #FIELD _len, 4, 4 # length in bytes #FIELD _status, 4, 4 # status enumeration #FIELD _user_data, 8, 8 # pointer to user data #UNION _union, size1, align1, \ # size2, align2, \ # size3, align3, \ # ... #END_FIELDS #%assign _JOB_AES_size _FIELD_OFFSET #%assign _JOB_AES_align _STRUCT_ALIGN ######################################################################### # Alternate "struc-like" syntax: # STRUCT job_aes2 # RES_Q .plaintext, 1 # RES_Q .ciphertext, 1 # RES_DQ .IV, 1 # RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN # RES_U .union, size1, align1, \ # size2, align2, \ # ... # ENDSTRUCT # # Following only needed if nesting # %assign job_aes2_size _FIELD_OFFSET # %assign job_aes2_align _STRUCT_ALIGN # # RES_* macros take a name, a count and an optional alignment. # The count in in terms of the base size of the macro, and the # default alignment is the base size. # The macros are: # Macro Base size # RES_B 1 # RES_W 2 # RES_D 4 # RES_Q 8 # RES_DQ 16 # RES_Y 32 # RES_Z 64 # # RES_U defines a union. It's arguments are a name and two or more # pairs of "size, alignment" # # The two assigns are only needed if this structure is being nested # within another. Even if the assigns are not done, one can still use # STRUCT_NAME_size as the size of the structure. # # Note that for nesting, you still need to assign to STRUCT_NAME_size. # # The differences between this and using "struc" directly are that each # type is implicitly aligned to its natural length (although this can be # over-ridden with an explicit third parameter), and that the structure # is padded at the end to its overall alignment. # ######################################################################### #ifndef _DATASTRUCT_ASM_ #define _DATASTRUCT_ASM_ #define SZ8 8*SHA256_DIGEST_WORD_SIZE #define ROUNDS 64*SZ8 #define PTR_SZ 8 #define SHA256_DIGEST_WORD_SIZE 4 #define MAX_SHA256_LANES 8 #define SHA256_DIGEST_WORDS 8 #define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE) #define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS) #define SHA256_BLK_SZ 64 # START_FIELDS .macro START_FIELDS _FIELD_OFFSET = 0 _STRUCT_ALIGN = 0 .endm # FIELD name size align .macro FIELD name size align _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) \name = _FIELD_OFFSET _FIELD_OFFSET = _FIELD_OFFSET + (\size) .if (\align > _STRUCT_ALIGN) _STRUCT_ALIGN = \align .endif .endm # END_FIELDS .macro END_FIELDS _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) .endm ######################################################################## .macro STRUCT p1 START_FIELDS .struc \p1 .endm .macro ENDSTRUCT tmp = _FIELD_OFFSET END_FIELDS tmp = (_FIELD_OFFSET - %%tmp) .if (tmp > 0) .lcomm tmp .endif .endstruc .endm ## RES_int name size align .macro RES_int p1 p2 p3 name = \p1 size = \p2 align = .\p3 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) .align align .lcomm name size _FIELD_OFFSET = _FIELD_OFFSET + (size) .if (align > _STRUCT_ALIGN) _STRUCT_ALIGN = align .endif .endm # macro RES_B name, size [, align] .macro RES_B _name, _size, _align=1 RES_int _name _size _align .endm # macro RES_W name, size [, align] .macro RES_W _name, _size, _align=2 RES_int _name 2*(_size) _align .endm # macro RES_D name, size [, align] .macro RES_D _name, _size, _align=4 RES_int _name 4*(_size) _align .endm # macro RES_Q name, size [, align] .macro RES_Q _name, _size, _align=8 RES_int _name 8*(_size) _align .endm # macro RES_DQ name, size [, align] .macro RES_DQ _name, _size, _align=16 RES_int _name 16*(_size) _align .endm # macro RES_Y name, size [, align] .macro RES_Y _name, _size, _align=32 RES_int _name 32*(_size) _align .endm # macro RES_Z name, size [, align] .macro RES_Z _name, _size, _align=64 RES_int _name 64*(_size) _align .endm #endif ######################################################################## #### Define SHA256 Out Of Order Data Structures ######################################################################## START_FIELDS # LANE_DATA ### name size align FIELD _job_in_lane, 8, 8 # pointer to job object END_FIELDS _LANE_DATA_size = _FIELD_OFFSET _LANE_DATA_align = _STRUCT_ALIGN ######################################################################## START_FIELDS # SHA256_ARGS_X4 ### name size align FIELD _digest, 4*8*8, 4 # transposed digest FIELD _data_ptr, 8*8, 8 # array of pointers to data END_FIELDS _SHA256_ARGS_X4_size = _FIELD_OFFSET _SHA256_ARGS_X4_align = _STRUCT_ALIGN _SHA256_ARGS_X8_size = _FIELD_OFFSET _SHA256_ARGS_X8_align = _STRUCT_ALIGN ####################################################################### START_FIELDS # MB_MGR ### name size align FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align FIELD _lens, 4*8, 8 FIELD _unused_lanes, 8, 8 FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align END_FIELDS _MB_MGR_size = _FIELD_OFFSET _MB_MGR_align = _STRUCT_ALIGN _args_digest = _args + _digest _args_data_ptr = _args + _data_ptr ####################################################################### START_FIELDS #STACK_FRAME ### name size align FIELD _data, 16*SZ8, 1 # transposed digest FIELD _digest, 8*SZ8, 1 # array of pointers to data FIELD _ytmp, 4*SZ8, 1 FIELD _rsp, 8, 1 END_FIELDS _STACK_FRAME_size = _FIELD_OFFSET _STACK_FRAME_align = _STRUCT_ALIGN ####################################################################### ######################################################################## #### Define constants ######################################################################## #define STS_UNKNOWN 0 #define STS_BEING_PROCESSED 1 #define STS_COMPLETED 2 ######################################################################## #### Define JOB_SHA256 structure ######################################################################## START_FIELDS # JOB_SHA256 ### name size align FIELD _buffer, 8, 8 # pointer to buffer FIELD _len, 8, 8 # length in bytes FIELD _result_digest, 8*4, 32 # Digest (output) FIELD _status, 4, 4 FIELD _user_data, 8, 8 END_FIELDS _JOB_SHA256_size = _FIELD_OFFSET _JOB_SHA256_align = _STRUCT_ALIGN
AirFortressIlikara/LS2K0300-linux-4.19
8,494
arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
/* * Flush routine for SHA256 multibuffer * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include <asm/frame.h> #include "sha256_mb_mgr_datastruct.S" .extern sha256_x8_avx2 #LINUX register definitions #define arg1 %rdi #define arg2 %rsi # Common register definitions #define state arg1 #define job arg2 #define len2 arg2 # idx must be a register not clobberred by sha1_mult #define idx %r8 #define DWORD_idx %r8d #define unused_lanes %rbx #define lane_data %rbx #define tmp2 %rbx #define tmp2_w %ebx #define job_rax %rax #define tmp1 %rax #define size_offset %rax #define tmp %rax #define start_offset %rax #define tmp3 %arg1 #define extra_blocks %arg2 #define p %arg2 .macro LABEL prefix n \prefix\n\(): .endm .macro JNE_SKIP i jne skip_\i .endm .altmacro .macro SET_OFFSET _offset offset = \_offset .endm .noaltmacro # JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state) # arg 1 : rcx : state ENTRY(sha256_mb_mgr_flush_avx2) FRAME_BEGIN push %rbx # If bit (32+3) is set, then all lanes are empty mov _unused_lanes(state), unused_lanes bt $32+3, unused_lanes jc return_null # find a lane with a non-null job xor idx, idx offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne one(%rip), idx offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne two(%rip), idx offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne three(%rip), idx offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne four(%rip), idx offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne five(%rip), idx offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne six(%rip), idx offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne seven(%rip), idx # copy idx to empty lanes copy_lane_data: offset = (_args + _data_ptr) mov offset(state,idx,8), tmp I = 0 .rep 8 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) .altmacro JNE_SKIP %I offset = (_args + _data_ptr + 8*I) mov tmp, offset(state) offset = (_lens + 4*I) movl $0xFFFFFFFF, offset(state) LABEL skip_ %I I = (I+1) .noaltmacro .endr # Find min length vmovdqu _lens+0*16(state), %xmm0 vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword vmovd %xmm2, DWORD_idx mov idx, len2 and $0xF, idx shr $4, len2 jz len_is_0 vpand clear_low_nibble(%rip), %xmm2, %xmm2 vpshufd $0, %xmm2, %xmm2 vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm1, %xmm1 vmovdqu %xmm0, _lens+0*16(state) vmovdqu %xmm1, _lens+1*16(state) # "state" and "args" are the same address, arg1 # len is arg2 call sha256_x8_avx2 # state and idx are intact len_is_0: # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) mov _unused_lanes(state), unused_lanes shl $4, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens(state,idx,4) vmovd _args_digest(state , idx, 4) , %xmm0 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 vmovd _args_digest+4*32(state, idx, 4), %xmm1 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 vmovdqu %xmm0, _result_digest(job_rax) offset = (_result_digest + 1*16) vmovdqu %xmm1, offset(job_rax) return: pop %rbx FRAME_END ret return_null: xor job_rax, job_rax jmp return ENDPROC(sha256_mb_mgr_flush_avx2) ############################################################################## .align 16 ENTRY(sha256_mb_mgr_get_comp_job_avx2) push %rbx ## if bit 32+3 is set, then all lanes are empty mov _unused_lanes(state), unused_lanes bt $(32+3), unused_lanes jc .return_null # Find min length vmovdqu _lens(state), %xmm0 vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword vmovd %xmm2, DWORD_idx test $~0xF, idx jnz .return_null # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) mov _unused_lanes(state), unused_lanes shl $4, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens(state, idx, 4) vmovd _args_digest(state, idx, 4), %xmm0 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 vmovd _args_digest+4*32(state, idx, 4), %xmm1 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 vmovdqu %xmm0, _result_digest(job_rax) offset = (_result_digest + 1*16) vmovdqu %xmm1, offset(job_rax) pop %rbx ret .return_null: xor job_rax, job_rax pop %rbx ret ENDPROC(sha256_mb_mgr_get_comp_job_avx2) .section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 .align 16 clear_low_nibble: .octa 0x000000000000000000000000FFFFFFF0 .section .rodata.cst8, "aM", @progbits, 8 .align 8 one: .quad 1 two: .quad 2 three: .quad 3 four: .quad 4 five: .quad 5 six: .quad 6 seven: .quad 7
AirFortressIlikara/LS2K0300-linux-4.19
18,835
arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
/* * Multi-buffer SHA256 algorithm hash compute routine * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include "sha256_mb_mgr_datastruct.S" ## code to compute oct SHA256 using SSE-256 ## outer calling routine takes care of save and restore of XMM registers ## Logic designed/laid out by JDG ## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15 ## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15 ## Linux preserves: rdi rbp r8 ## ## clobbers %ymm0-15 arg1 = %rdi arg2 = %rsi reg3 = %rcx reg4 = %rdx # Common definitions STATE = arg1 INP_SIZE = arg2 IDX = %rax ROUND = %rbx TBL = reg3 inp0 = %r9 inp1 = %r10 inp2 = %r11 inp3 = %r12 inp4 = %r13 inp5 = %r14 inp6 = %r15 inp7 = reg4 a = %ymm0 b = %ymm1 c = %ymm2 d = %ymm3 e = %ymm4 f = %ymm5 g = %ymm6 h = %ymm7 T1 = %ymm8 a0 = %ymm12 a1 = %ymm13 a2 = %ymm14 TMP = %ymm15 TMP0 = %ymm6 TMP1 = %ymm7 TT0 = %ymm8 TT1 = %ymm9 TT2 = %ymm10 TT3 = %ymm11 TT4 = %ymm12 TT5 = %ymm13 TT6 = %ymm14 TT7 = %ymm15 # Define stack usage # Assume stack aligned to 32 bytes before call # Therefore FRAMESZ mod 32 must be 32-8 = 24 #define FRAMESZ 0x388 #define VMOVPS vmovups # TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 # "transpose" data in {r0...r7} using temps {t0...t1} # Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7} # r0 = {a7 a6 a5 a4 a3 a2 a1 a0} # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} # r2 = {c7 c6 c5 c4 c3 c2 c1 c0} # r3 = {d7 d6 d5 d4 d3 d2 d1 d0} # r4 = {e7 e6 e5 e4 e3 e2 e1 e0} # r5 = {f7 f6 f5 f4 f3 f2 f1 f0} # r6 = {g7 g6 g5 g4 g3 g2 g1 g0} # r7 = {h7 h6 h5 h4 h3 h2 h1 h0} # # Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7} # r0 = {h0 g0 f0 e0 d0 c0 b0 a0} # r1 = {h1 g1 f1 e1 d1 c1 b1 a1} # r2 = {h2 g2 f2 e2 d2 c2 b2 a2} # r3 = {h3 g3 f3 e3 d3 c3 b3 a3} # r4 = {h4 g4 f4 e4 d4 c4 b4 a4} # r5 = {h5 g5 f5 e5 d5 c5 b5 a5} # r6 = {h6 g6 f6 e6 d6 c6 b6 a6} # r7 = {h7 g7 f7 e7 d7 c7 b7 a7} # .macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1 # process top half (r0..r3) {a...d} vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1} vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2} vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3} vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0} # use r2 in place of t0 # process bottom half (r4..r7) {e...h} vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0} vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2} vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0} vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2} vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1} vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2} vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3} vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0} vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6 vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2 vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5 vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1 vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7 vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3 vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4 vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0 .endm .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro _PRORD reg imm tmp vpslld $(32-\imm),\reg,\tmp vpsrld $\imm,\reg, \reg vpor \tmp,\reg, \reg .endm # PRORD_nd reg, imm, tmp, src .macro _PRORD_nd reg imm tmp src vpslld $(32-\imm), \src, \tmp vpsrld $\imm, \src, \reg vpor \tmp, \reg, \reg .endm # PRORD dst/src, amt .macro PRORD reg imm _PRORD \reg,\imm,TMP .endm # PRORD_nd dst, src, amt .macro PRORD_nd reg tmp imm _PRORD_nd \reg, \imm, TMP, \tmp .endm # arguments passed implicitly in preprocessor symbols i, a...h .macro ROUND_00_15 _T1 i PRORD_nd a0,e,5 # sig1: a0 = (e >> 5) vpxor g, f, a2 # ch: a2 = f^g vpand e,a2, a2 # ch: a2 = (f^g)&e vpxor g, a2, a2 # a2 = ch PRORD_nd a1,e,25 # sig1: a1 = (e >> 25) vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp) vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5) PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11) vpaddd a2, h, h # h = h + ch PRORD_nd a2,a,11 # sig0: a2 = (a >> 11) vpaddd \_T1,h, h # h = h + ch + W + K vpxor a1, a0, a0 # a0 = sigma1 PRORD_nd a1,a,22 # sig0: a1 = (a >> 22) vpxor c, a, \_T1 # maj: T1 = a^c add $SZ8, ROUND # ROUND++ vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b vpaddd a0, h, h vpaddd h, d, d vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11) PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13) vpxor a1, a2, a2 # a2 = sig0 vpand c, a, a1 # maj: a1 = a&c vpor \_T1, a1, a1 # a1 = maj vpaddd a1, h, h # h = h + ch + W + K + maj vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0 ROTATE_ARGS .endm # arguments passed implicitly in preprocessor symbols i, a...h .macro ROUND_16_XX _T1 i vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1 vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1 vmovdqu \_T1, a0 PRORD \_T1,11 vmovdqu a1, a2 PRORD a1,2 vpxor a0, \_T1, \_T1 PRORD \_T1, 7 vpxor a2, a1, a1 PRORD a1, 17 vpsrld $3, a0, a0 vpxor a0, \_T1, \_T1 vpsrld $10, a2, a2 vpxor a2, a1, a1 vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1 vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1 vpaddd a1, \_T1, \_T1 ROUND_00_15 \_T1,\i .endm # SHA256_ARGS: # UINT128 digest[8]; // transposed digests # UINT8 *data_ptr[4]; # void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes); # arg 1 : STATE : pointer to array of pointers to input data # arg 2 : INP_SIZE : size of input in blocks # general registers preserved in outer calling routine # outer calling routine saves all the XMM registers # save rsp, allocate 32-byte aligned for local variables ENTRY(sha256_x8_avx2) # save callee-saved clobbered registers to comply with C function ABI push %r12 push %r13 push %r14 push %r15 mov %rsp, IDX sub $FRAMESZ, %rsp and $~0x1F, %rsp mov IDX, _rsp(%rsp) # Load the pre-transposed incoming digest. vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h lea K256_8(%rip),TBL # load the address of each of the 4 message lanes # getting ready to transpose input onto stack mov _args_data_ptr+0*PTR_SZ(STATE),inp0 mov _args_data_ptr+1*PTR_SZ(STATE),inp1 mov _args_data_ptr+2*PTR_SZ(STATE),inp2 mov _args_data_ptr+3*PTR_SZ(STATE),inp3 mov _args_data_ptr+4*PTR_SZ(STATE),inp4 mov _args_data_ptr+5*PTR_SZ(STATE),inp5 mov _args_data_ptr+6*PTR_SZ(STATE),inp6 mov _args_data_ptr+7*PTR_SZ(STATE),inp7 xor IDX, IDX lloop: xor ROUND, ROUND # save old digest vmovdqu a, _digest(%rsp) vmovdqu b, _digest+1*SZ8(%rsp) vmovdqu c, _digest+2*SZ8(%rsp) vmovdqu d, _digest+3*SZ8(%rsp) vmovdqu e, _digest+4*SZ8(%rsp) vmovdqu f, _digest+5*SZ8(%rsp) vmovdqu g, _digest+6*SZ8(%rsp) vmovdqu h, _digest+7*SZ8(%rsp) i = 0 .rep 2 VMOVPS i*32(inp0, IDX), TT0 VMOVPS i*32(inp1, IDX), TT1 VMOVPS i*32(inp2, IDX), TT2 VMOVPS i*32(inp3, IDX), TT3 VMOVPS i*32(inp4, IDX), TT4 VMOVPS i*32(inp5, IDX), TT5 VMOVPS i*32(inp6, IDX), TT6 VMOVPS i*32(inp7, IDX), TT7 vmovdqu g, _ytmp(%rsp) vmovdqu h, _ytmp+1*SZ8(%rsp) TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1 vmovdqu _ytmp(%rsp), g vpshufb TMP1, TT0, TT0 vpshufb TMP1, TT1, TT1 vpshufb TMP1, TT2, TT2 vpshufb TMP1, TT3, TT3 vpshufb TMP1, TT4, TT4 vpshufb TMP1, TT5, TT5 vpshufb TMP1, TT6, TT6 vpshufb TMP1, TT7, TT7 vmovdqu _ytmp+1*SZ8(%rsp), h vmovdqu TT4, _ytmp(%rsp) vmovdqu TT5, _ytmp+1*SZ8(%rsp) vmovdqu TT6, _ytmp+2*SZ8(%rsp) vmovdqu TT7, _ytmp+3*SZ8(%rsp) ROUND_00_15 TT0,(i*8+0) vmovdqu _ytmp(%rsp), TT0 ROUND_00_15 TT1,(i*8+1) vmovdqu _ytmp+1*SZ8(%rsp), TT1 ROUND_00_15 TT2,(i*8+2) vmovdqu _ytmp+2*SZ8(%rsp), TT2 ROUND_00_15 TT3,(i*8+3) vmovdqu _ytmp+3*SZ8(%rsp), TT3 ROUND_00_15 TT0,(i*8+4) ROUND_00_15 TT1,(i*8+5) ROUND_00_15 TT2,(i*8+6) ROUND_00_15 TT3,(i*8+7) i = (i+1) .endr add $64, IDX i = (i*8) jmp Lrounds_16_xx .align 16 Lrounds_16_xx: .rep 16 ROUND_16_XX T1, i i = (i+1) .endr cmp $ROUNDS,ROUND jb Lrounds_16_xx # add old digest vpaddd _digest+0*SZ8(%rsp), a, a vpaddd _digest+1*SZ8(%rsp), b, b vpaddd _digest+2*SZ8(%rsp), c, c vpaddd _digest+3*SZ8(%rsp), d, d vpaddd _digest+4*SZ8(%rsp), e, e vpaddd _digest+5*SZ8(%rsp), f, f vpaddd _digest+6*SZ8(%rsp), g, g vpaddd _digest+7*SZ8(%rsp), h, h sub $1, INP_SIZE # unit is blocks jne lloop # write back to memory (state object) the transposed digest vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE) vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE) # update input pointers add IDX, inp0 mov inp0, _args_data_ptr+0*8(STATE) add IDX, inp1 mov inp1, _args_data_ptr+1*8(STATE) add IDX, inp2 mov inp2, _args_data_ptr+2*8(STATE) add IDX, inp3 mov inp3, _args_data_ptr+3*8(STATE) add IDX, inp4 mov inp4, _args_data_ptr+4*8(STATE) add IDX, inp5 mov inp5, _args_data_ptr+5*8(STATE) add IDX, inp6 mov inp6, _args_data_ptr+6*8(STATE) add IDX, inp7 mov inp7, _args_data_ptr+7*8(STATE) # Postamble mov _rsp(%rsp), %rsp # restore callee-saved clobbered registers pop %r15 pop %r14 pop %r13 pop %r12 ret ENDPROC(sha256_x8_avx2) .section .rodata.K256_8, "a", @progbits .align 64 K256_8: .octa 0x428a2f98428a2f98428a2f98428a2f98 .octa 0x428a2f98428a2f98428a2f98428a2f98 .octa 0x71374491713744917137449171374491 .octa 0x71374491713744917137449171374491 .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5 .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5 .octa 0x3956c25b3956c25b3956c25b3956c25b .octa 0x3956c25b3956c25b3956c25b3956c25b .octa 0x59f111f159f111f159f111f159f111f1 .octa 0x59f111f159f111f159f111f159f111f1 .octa 0x923f82a4923f82a4923f82a4923f82a4 .octa 0x923f82a4923f82a4923f82a4923f82a4 .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5 .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5 .octa 0xd807aa98d807aa98d807aa98d807aa98 .octa 0xd807aa98d807aa98d807aa98d807aa98 .octa 0x12835b0112835b0112835b0112835b01 .octa 0x12835b0112835b0112835b0112835b01 .octa 0x243185be243185be243185be243185be .octa 0x243185be243185be243185be243185be .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3 .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3 .octa 0x72be5d7472be5d7472be5d7472be5d74 .octa 0x72be5d7472be5d7472be5d7472be5d74 .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7 .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7 .octa 0xc19bf174c19bf174c19bf174c19bf174 .octa 0xc19bf174c19bf174c19bf174c19bf174 .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1 .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1 .octa 0xefbe4786efbe4786efbe4786efbe4786 .octa 0xefbe4786efbe4786efbe4786efbe4786 .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6 .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6 .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc .octa 0x76f988da76f988da76f988da76f988da .octa 0x76f988da76f988da76f988da76f988da .octa 0x983e5152983e5152983e5152983e5152 .octa 0x983e5152983e5152983e5152983e5152 .octa 0xa831c66da831c66da831c66da831c66d .octa 0xa831c66da831c66da831c66da831c66d .octa 0xb00327c8b00327c8b00327c8b00327c8 .octa 0xb00327c8b00327c8b00327c8b00327c8 .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7 .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7 .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3 .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3 .octa 0xd5a79147d5a79147d5a79147d5a79147 .octa 0xd5a79147d5a79147d5a79147d5a79147 .octa 0x06ca635106ca635106ca635106ca6351 .octa 0x06ca635106ca635106ca635106ca6351 .octa 0x14292967142929671429296714292967 .octa 0x14292967142929671429296714292967 .octa 0x27b70a8527b70a8527b70a8527b70a85 .octa 0x27b70a8527b70a8527b70a8527b70a85 .octa 0x2e1b21382e1b21382e1b21382e1b2138 .octa 0x2e1b21382e1b21382e1b21382e1b2138 .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc .octa 0x53380d1353380d1353380d1353380d13 .octa 0x53380d1353380d1353380d1353380d13 .octa 0x650a7354650a7354650a7354650a7354 .octa 0x650a7354650a7354650a7354650a7354 .octa 0x766a0abb766a0abb766a0abb766a0abb .octa 0x766a0abb766a0abb766a0abb766a0abb .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e .octa 0x92722c8592722c8592722c8592722c85 .octa 0x92722c8592722c8592722c8592722c85 .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1 .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1 .octa 0xa81a664ba81a664ba81a664ba81a664b .octa 0xa81a664ba81a664ba81a664ba81a664b .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70 .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70 .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3 .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3 .octa 0xd192e819d192e819d192e819d192e819 .octa 0xd192e819d192e819d192e819d192e819 .octa 0xd6990624d6990624d6990624d6990624 .octa 0xd6990624d6990624d6990624d6990624 .octa 0xf40e3585f40e3585f40e3585f40e3585 .octa 0xf40e3585f40e3585f40e3585f40e3585 .octa 0x106aa070106aa070106aa070106aa070 .octa 0x106aa070106aa070106aa070106aa070 .octa 0x19a4c11619a4c11619a4c11619a4c116 .octa 0x19a4c11619a4c11619a4c11619a4c116 .octa 0x1e376c081e376c081e376c081e376c08 .octa 0x1e376c081e376c081e376c081e376c08 .octa 0x2748774c2748774c2748774c2748774c .octa 0x2748774c2748774c2748774c2748774c .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5 .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5 .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3 .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3 .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3 .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3 .octa 0x748f82ee748f82ee748f82ee748f82ee .octa 0x748f82ee748f82ee748f82ee748f82ee .octa 0x78a5636f78a5636f78a5636f78a5636f .octa 0x78a5636f78a5636f78a5636f78a5636f .octa 0x84c8781484c8781484c8781484c87814 .octa 0x84c8781484c8781484c8781484c87814 .octa 0x8cc702088cc702088cc702088cc70208 .octa 0x8cc702088cc702088cc702088cc70208 .octa 0x90befffa90befffa90befffa90befffa .octa 0x90befffa90befffa90befffa90befffa .octa 0xa4506ceba4506ceba4506ceba4506ceb .octa 0xa4506ceba4506ceba4506ceba4506ceb .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7 .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7 .octa 0xc67178f2c67178f2c67178f2c67178f2 .octa 0xc67178f2c67178f2c67178f2c67178f2 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .octa 0x0c0d0e0f08090a0b0405060700010203 .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 .global K256 K256: .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
AirFortressIlikara/LS2K0300-linux-4.19
6,223
arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
/* * Buffer submit code for multi buffer SHA256 algorithm * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include <asm/frame.h> #include "sha256_mb_mgr_datastruct.S" .extern sha256_x8_avx2 # LINUX register definitions arg1 = %rdi arg2 = %rsi size_offset = %rcx tmp2 = %rcx extra_blocks = %rdx # Common definitions #define state arg1 #define job %rsi #define len2 arg2 #define p2 arg2 # idx must be a register not clobberred by sha1_x8_avx2 idx = %r8 DWORD_idx = %r8d last_len = %r8 p = %r11 start_offset = %r11 unused_lanes = %rbx BYTE_unused_lanes = %bl job_rax = %rax len = %rax DWORD_len = %eax lane = %r12 tmp3 = %r12 tmp = %r9 DWORD_tmp = %r9d lane_data = %r10 # JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job) # arg 1 : rcx : state # arg 2 : rdx : job ENTRY(sha256_mb_mgr_submit_avx2) FRAME_BEGIN push %rbx push %r12 mov _unused_lanes(state), unused_lanes mov unused_lanes, lane and $0xF, lane shr $4, unused_lanes imul $_LANE_DATA_size, lane, lane_data movl $STS_BEING_PROCESSED, _status(job) lea _ldata(state, lane_data), lane_data mov unused_lanes, _unused_lanes(state) movl _len(job), DWORD_len mov job, _job_in_lane(lane_data) shl $4, len or lane, len movl DWORD_len, _lens(state , lane, 4) # Load digest words from result_digest vmovdqu _result_digest(job), %xmm0 vmovdqu _result_digest+1*16(job), %xmm1 vmovd %xmm0, _args_digest(state, lane, 4) vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4) vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4) vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4) vmovd %xmm1, _args_digest+4*32(state , lane, 4) vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4) vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4) vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4) mov _buffer(job), p mov p, _args_data_ptr(state, lane, 8) cmp $0xF, unused_lanes jne return_null start_loop: # Find min length vmovdqa _lens(state), %xmm0 vmovdqa _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword vmovd %xmm2, DWORD_idx mov idx, len2 and $0xF, idx shr $4, len2 jz len_is_0 vpand clear_low_nibble(%rip), %xmm2, %xmm2 vpshufd $0, %xmm2, %xmm2 vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm1, %xmm1 vmovdqa %xmm0, _lens + 0*16(state) vmovdqa %xmm1, _lens + 1*16(state) # "state" and "args" are the same address, arg1 # len is arg2 call sha256_x8_avx2 # state and idx are intact len_is_0: # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax mov _unused_lanes(state), unused_lanes movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) shl $4, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens(state,idx,4) vmovd _args_digest(state, idx, 4), %xmm0 vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0 vmovd _args_digest+4*32(state, idx, 4), %xmm1 vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1 vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1 vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1 vmovdqu %xmm0, _result_digest(job_rax) vmovdqu %xmm1, _result_digest+1*16(job_rax) return: pop %r12 pop %rbx FRAME_END ret return_null: xor job_rax, job_rax jmp return ENDPROC(sha256_mb_mgr_submit_avx2) .section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 .align 16 clear_low_nibble: .octa 0x000000000000000000000000FFFFFFF0
AirFortressIlikara/LS2K0300-linux-4.19
13,074
arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
/* * Multi-buffer SHA1 algorithm hash compute routine * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * James Guilford <james.guilford@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include "sha1_mb_mgr_datastruct.S" ## code to compute oct SHA1 using SSE-256 ## outer calling routine takes care of save and restore of XMM registers ## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15 ## ## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15 ## Linux preserves: rdi rbp r8 ## ## clobbers ymm0-15 # TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 # "transpose" data in {r0...r7} using temps {t0...t1} # Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7} # r0 = {a7 a6 a5 a4 a3 a2 a1 a0} # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} # r2 = {c7 c6 c5 c4 c3 c2 c1 c0} # r3 = {d7 d6 d5 d4 d3 d2 d1 d0} # r4 = {e7 e6 e5 e4 e3 e2 e1 e0} # r5 = {f7 f6 f5 f4 f3 f2 f1 f0} # r6 = {g7 g6 g5 g4 g3 g2 g1 g0} # r7 = {h7 h6 h5 h4 h3 h2 h1 h0} # # Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7} # r0 = {h0 g0 f0 e0 d0 c0 b0 a0} # r1 = {h1 g1 f1 e1 d1 c1 b1 a1} # r2 = {h2 g2 f2 e2 d2 c2 b2 a2} # r3 = {h3 g3 f3 e3 d3 c3 b3 a3} # r4 = {h4 g4 f4 e4 d4 c4 b4 a4} # r5 = {h5 g5 f5 e5 d5 c5 b5 a5} # r6 = {h6 g6 f6 e6 d6 c6 b6 a6} # r7 = {h7 g7 f7 e7 d7 c7 b7 a7} # .macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1 # process top half (r0..r3) {a...d} vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1} vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2} vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3} vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0} # use r2 in place of t0 # process bottom half (r4..r7) {e...h} vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0} vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2} vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0} vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2} vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1} vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2} vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3} vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0} vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6 vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2 vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5 vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1 vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7 vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3 vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4 vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0 .endm ## ## Magic functions defined in FIPS 180-1 ## # macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D))) .macro MAGIC_F0 regF regB regC regD regT vpxor \regD, \regC, \regF vpand \regB, \regF, \regF vpxor \regD, \regF, \regF .endm # macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D) .macro MAGIC_F1 regF regB regC regD regT vpxor \regC, \regD, \regF vpxor \regB, \regF, \regF .endm # macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D)) .macro MAGIC_F2 regF regB regC regD regT vpor \regC, \regB, \regF vpand \regC, \regB, \regT vpand \regD, \regF, \regF vpor \regT, \regF, \regF .endm # macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D) .macro MAGIC_F3 regF regB regC regD regT MAGIC_F1 \regF,\regB,\regC,\regD,\regT .endm # PROLD reg, imm, tmp .macro PROLD reg imm tmp vpsrld $(32-\imm), \reg, \tmp vpslld $\imm, \reg, \reg vpor \tmp, \reg, \reg .endm .macro PROLD_nd reg imm tmp src vpsrld $(32-\imm), \src, \tmp vpslld $\imm, \src, \reg vpor \tmp, \reg, \reg .endm .macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC vpaddd \immCNT, \regE, \regE vpaddd \memW*32(%rsp), \regE, \regE PROLD_nd \regT, 5, \regF, \regA vpaddd \regT, \regE, \regE \MAGIC \regF, \regB, \regC, \regD, \regT PROLD \regB, 30, \regT vpaddd \regF, \regE, \regE .endm .macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC vpaddd \immCNT, \regE, \regE offset = ((\memW - 14) & 15) * 32 vmovdqu offset(%rsp), W14 vpxor W14, W16, W16 offset = ((\memW - 8) & 15) * 32 vpxor offset(%rsp), W16, W16 offset = ((\memW - 3) & 15) * 32 vpxor offset(%rsp), W16, W16 vpsrld $(32-1), W16, \regF vpslld $1, W16, W16 vpor W16, \regF, \regF ROTATE_W offset = ((\memW - 0) & 15) * 32 vmovdqu \regF, offset(%rsp) vpaddd \regF, \regE, \regE PROLD_nd \regT, 5, \regF, \regA vpaddd \regT, \regE, \regE \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D) PROLD \regB,30, \regT vpaddd \regF, \regE, \regE .endm ######################################################################## ######################################################################## ######################################################################## ## FRAMESZ plus pushes must be an odd multiple of 8 YMM_SAVE = (15-15)*32 FRAMESZ = 32*16 + YMM_SAVE _YMM = FRAMESZ - YMM_SAVE #define VMOVPS vmovups IDX = %rax inp0 = %r9 inp1 = %r10 inp2 = %r11 inp3 = %r12 inp4 = %r13 inp5 = %r14 inp6 = %r15 inp7 = %rcx arg1 = %rdi arg2 = %rsi RSP_SAVE = %rdx # ymm0 A # ymm1 B # ymm2 C # ymm3 D # ymm4 E # ymm5 F AA # ymm6 T0 BB # ymm7 T1 CC # ymm8 T2 DD # ymm9 T3 EE # ymm10 T4 TMP # ymm11 T5 FUN # ymm12 T6 K # ymm13 T7 W14 # ymm14 T8 W15 # ymm15 T9 W16 A = %ymm0 B = %ymm1 C = %ymm2 D = %ymm3 E = %ymm4 F = %ymm5 T0 = %ymm6 T1 = %ymm7 T2 = %ymm8 T3 = %ymm9 T4 = %ymm10 T5 = %ymm11 T6 = %ymm12 T7 = %ymm13 T8 = %ymm14 T9 = %ymm15 AA = %ymm5 BB = %ymm6 CC = %ymm7 DD = %ymm8 EE = %ymm9 TMP = %ymm10 FUN = %ymm11 K = %ymm12 W14 = %ymm13 W15 = %ymm14 W16 = %ymm15 .macro ROTATE_ARGS TMP_ = E E = D D = C C = B B = A A = TMP_ .endm .macro ROTATE_W TMP_ = W16 W16 = W15 W15 = W14 W14 = TMP_ .endm # 8 streams x 5 32bit words per digest x 4 bytes per word #define DIGEST_SIZE (8*5*4) .align 32 # void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size) # arg 1 : pointer to array[4] of pointer to input data # arg 2 : size (in blocks) ;; assumed to be >= 1 # ENTRY(sha1_x8_avx2) # save callee-saved clobbered registers to comply with C function ABI push %r12 push %r13 push %r14 push %r15 #save rsp mov %rsp, RSP_SAVE sub $FRAMESZ, %rsp #align rsp to 32 Bytes and $~0x1F, %rsp ## Initialize digests vmovdqu 0*32(arg1), A vmovdqu 1*32(arg1), B vmovdqu 2*32(arg1), C vmovdqu 3*32(arg1), D vmovdqu 4*32(arg1), E ## transpose input onto stack mov _data_ptr+0*8(arg1),inp0 mov _data_ptr+1*8(arg1),inp1 mov _data_ptr+2*8(arg1),inp2 mov _data_ptr+3*8(arg1),inp3 mov _data_ptr+4*8(arg1),inp4 mov _data_ptr+5*8(arg1),inp5 mov _data_ptr+6*8(arg1),inp6 mov _data_ptr+7*8(arg1),inp7 xor IDX, IDX lloop: vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F I=0 .rep 2 VMOVPS (inp0, IDX), T0 VMOVPS (inp1, IDX), T1 VMOVPS (inp2, IDX), T2 VMOVPS (inp3, IDX), T3 VMOVPS (inp4, IDX), T4 VMOVPS (inp5, IDX), T5 VMOVPS (inp6, IDX), T6 VMOVPS (inp7, IDX), T7 TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9 vpshufb F, T0, T0 vmovdqu T0, (I*8)*32(%rsp) vpshufb F, T1, T1 vmovdqu T1, (I*8+1)*32(%rsp) vpshufb F, T2, T2 vmovdqu T2, (I*8+2)*32(%rsp) vpshufb F, T3, T3 vmovdqu T3, (I*8+3)*32(%rsp) vpshufb F, T4, T4 vmovdqu T4, (I*8+4)*32(%rsp) vpshufb F, T5, T5 vmovdqu T5, (I*8+5)*32(%rsp) vpshufb F, T6, T6 vmovdqu T6, (I*8+6)*32(%rsp) vpshufb F, T7, T7 vmovdqu T7, (I*8+7)*32(%rsp) add $32, IDX I = (I+1) .endr # save old digests vmovdqu A,AA vmovdqu B,BB vmovdqu C,CC vmovdqu D,DD vmovdqu E,EE ## ## perform 0-79 steps ## vmovdqu K00_19(%rip), K ## do rounds 0...15 I = 0 .rep 16 SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0 ROTATE_ARGS I = (I+1) .endr ## do rounds 16...19 vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16 vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15 .rep 4 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0 ROTATE_ARGS I = (I+1) .endr ## do rounds 20...39 vmovdqu K20_39(%rip), K .rep 20 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1 ROTATE_ARGS I = (I+1) .endr ## do rounds 40...59 vmovdqu K40_59(%rip), K .rep 20 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2 ROTATE_ARGS I = (I+1) .endr ## do rounds 60...79 vmovdqu K60_79(%rip), K .rep 20 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3 ROTATE_ARGS I = (I+1) .endr vpaddd AA,A,A vpaddd BB,B,B vpaddd CC,C,C vpaddd DD,D,D vpaddd EE,E,E sub $1, arg2 jne lloop # write out digests vmovdqu A, 0*32(arg1) vmovdqu B, 1*32(arg1) vmovdqu C, 2*32(arg1) vmovdqu D, 3*32(arg1) vmovdqu E, 4*32(arg1) # update input pointers add IDX, inp0 add IDX, inp1 add IDX, inp2 add IDX, inp3 add IDX, inp4 add IDX, inp5 add IDX, inp6 add IDX, inp7 mov inp0, _data_ptr (arg1) mov inp1, _data_ptr + 1*8(arg1) mov inp2, _data_ptr + 2*8(arg1) mov inp3, _data_ptr + 3*8(arg1) mov inp4, _data_ptr + 4*8(arg1) mov inp5, _data_ptr + 5*8(arg1) mov inp6, _data_ptr + 6*8(arg1) mov inp7, _data_ptr + 7*8(arg1) ################ ## Postamble mov RSP_SAVE, %rsp # restore callee-saved clobbered registers pop %r15 pop %r14 pop %r13 pop %r12 ret ENDPROC(sha1_x8_avx2) .section .rodata.cst32.K00_19, "aM", @progbits, 32 .align 32 K00_19: .octa 0x5A8279995A8279995A8279995A827999 .octa 0x5A8279995A8279995A8279995A827999 .section .rodata.cst32.K20_39, "aM", @progbits, 32 .align 32 K20_39: .octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1 .octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1 .section .rodata.cst32.K40_59, "aM", @progbits, 32 .align 32 K40_59: .octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC .octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC .section .rodata.cst32.K60_79, "aM", @progbits, 32 .align 32 K60_79: .octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6 .octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .octa 0x0c0d0e0f08090a0b0405060700010203
AirFortressIlikara/LS2K0300-linux-4.19
8,771
arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
/* * Header file for multi buffer SHA1 algorithm data structure * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * James Guilford <james.guilford@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ # Macros for defining data structures # Usage example #START_FIELDS # JOB_AES ### name size align #FIELD _plaintext, 8, 8 # pointer to plaintext #FIELD _ciphertext, 8, 8 # pointer to ciphertext #FIELD _IV, 16, 8 # IV #FIELD _keys, 8, 8 # pointer to keys #FIELD _len, 4, 4 # length in bytes #FIELD _status, 4, 4 # status enumeration #FIELD _user_data, 8, 8 # pointer to user data #UNION _union, size1, align1, \ # size2, align2, \ # size3, align3, \ # ... #END_FIELDS #%assign _JOB_AES_size _FIELD_OFFSET #%assign _JOB_AES_align _STRUCT_ALIGN ######################################################################### # Alternate "struc-like" syntax: # STRUCT job_aes2 # RES_Q .plaintext, 1 # RES_Q .ciphertext, 1 # RES_DQ .IV, 1 # RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN # RES_U .union, size1, align1, \ # size2, align2, \ # ... # ENDSTRUCT # # Following only needed if nesting # %assign job_aes2_size _FIELD_OFFSET # %assign job_aes2_align _STRUCT_ALIGN # # RES_* macros take a name, a count and an optional alignment. # The count in in terms of the base size of the macro, and the # default alignment is the base size. # The macros are: # Macro Base size # RES_B 1 # RES_W 2 # RES_D 4 # RES_Q 8 # RES_DQ 16 # RES_Y 32 # RES_Z 64 # # RES_U defines a union. It's arguments are a name and two or more # pairs of "size, alignment" # # The two assigns are only needed if this structure is being nested # within another. Even if the assigns are not done, one can still use # STRUCT_NAME_size as the size of the structure. # # Note that for nesting, you still need to assign to STRUCT_NAME_size. # # The differences between this and using "struc" directly are that each # type is implicitly aligned to its natural length (although this can be # over-ridden with an explicit third parameter), and that the structure # is padded at the end to its overall alignment. # ######################################################################### #ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_ #define _SHA1_MB_MGR_DATASTRUCT_ASM_ ## START_FIELDS .macro START_FIELDS _FIELD_OFFSET = 0 _STRUCT_ALIGN = 0 .endm ## FIELD name size align .macro FIELD name size align _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) \name = _FIELD_OFFSET _FIELD_OFFSET = _FIELD_OFFSET + (\size) .if (\align > _STRUCT_ALIGN) _STRUCT_ALIGN = \align .endif .endm ## END_FIELDS .macro END_FIELDS _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) .endm ######################################################################## .macro STRUCT p1 START_FIELDS .struc \p1 .endm .macro ENDSTRUCT tmp = _FIELD_OFFSET END_FIELDS tmp = (_FIELD_OFFSET - %%tmp) .if (tmp > 0) .lcomm tmp .endif .endstruc .endm ## RES_int name size align .macro RES_int p1 p2 p3 name = \p1 size = \p2 align = .\p3 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) .align align .lcomm name size _FIELD_OFFSET = _FIELD_OFFSET + (size) .if (align > _STRUCT_ALIGN) _STRUCT_ALIGN = align .endif .endm # macro RES_B name, size [, align] .macro RES_B _name, _size, _align=1 RES_int _name _size _align .endm # macro RES_W name, size [, align] .macro RES_W _name, _size, _align=2 RES_int _name 2*(_size) _align .endm # macro RES_D name, size [, align] .macro RES_D _name, _size, _align=4 RES_int _name 4*(_size) _align .endm # macro RES_Q name, size [, align] .macro RES_Q _name, _size, _align=8 RES_int _name 8*(_size) _align .endm # macro RES_DQ name, size [, align] .macro RES_DQ _name, _size, _align=16 RES_int _name 16*(_size) _align .endm # macro RES_Y name, size [, align] .macro RES_Y _name, _size, _align=32 RES_int _name 32*(_size) _align .endm # macro RES_Z name, size [, align] .macro RES_Z _name, _size, _align=64 RES_int _name 64*(_size) _align .endm #endif ######################################################################## #### Define constants ######################################################################## ######################################################################## #### Define SHA1 Out Of Order Data Structures ######################################################################## START_FIELDS # LANE_DATA ### name size align FIELD _job_in_lane, 8, 8 # pointer to job object END_FIELDS _LANE_DATA_size = _FIELD_OFFSET _LANE_DATA_align = _STRUCT_ALIGN ######################################################################## START_FIELDS # SHA1_ARGS_X8 ### name size align FIELD _digest, 4*5*8, 16 # transposed digest FIELD _data_ptr, 8*8, 8 # array of pointers to data END_FIELDS _SHA1_ARGS_X4_size = _FIELD_OFFSET _SHA1_ARGS_X4_align = _STRUCT_ALIGN _SHA1_ARGS_X8_size = _FIELD_OFFSET _SHA1_ARGS_X8_align = _STRUCT_ALIGN ######################################################################## START_FIELDS # MB_MGR ### name size align FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align FIELD _lens, 4*8, 8 FIELD _unused_lanes, 8, 8 FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align END_FIELDS _MB_MGR_size = _FIELD_OFFSET _MB_MGR_align = _STRUCT_ALIGN _args_digest = _args + _digest _args_data_ptr = _args + _data_ptr ######################################################################## #### Define constants ######################################################################## #define STS_UNKNOWN 0 #define STS_BEING_PROCESSED 1 #define STS_COMPLETED 2 ######################################################################## #### Define JOB_SHA1 structure ######################################################################## START_FIELDS # JOB_SHA1 ### name size align FIELD _buffer, 8, 8 # pointer to buffer FIELD _len, 4, 4 # length in bytes FIELD _result_digest, 5*4, 32 # Digest (output) FIELD _status, 4, 4 FIELD _user_data, 8, 8 END_FIELDS _JOB_SHA1_size = _FIELD_OFFSET _JOB_SHA1_align = _STRUCT_ALIGN
AirFortressIlikara/LS2K0300-linux-4.19
8,432
arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
/* * Flush routine for SHA1 multibuffer * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * James Guilford <james.guilford@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include <asm/frame.h> #include "sha1_mb_mgr_datastruct.S" .extern sha1_x8_avx2 # LINUX register definitions #define arg1 %rdi #define arg2 %rsi # Common definitions #define state arg1 #define job arg2 #define len2 arg2 # idx must be a register not clobbered by sha1_x8_avx2 #define idx %r8 #define DWORD_idx %r8d #define unused_lanes %rbx #define lane_data %rbx #define tmp2 %rbx #define tmp2_w %ebx #define job_rax %rax #define tmp1 %rax #define size_offset %rax #define tmp %rax #define start_offset %rax #define tmp3 %arg1 #define extra_blocks %arg2 #define p %arg2 .macro LABEL prefix n \prefix\n\(): .endm .macro JNE_SKIP i jne skip_\i .endm .altmacro .macro SET_OFFSET _offset offset = \_offset .endm .noaltmacro # JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state) # arg 1 : rcx : state ENTRY(sha1_mb_mgr_flush_avx2) FRAME_BEGIN push %rbx # If bit (32+3) is set, then all lanes are empty mov _unused_lanes(state), unused_lanes bt $32+3, unused_lanes jc return_null # find a lane with a non-null job xor idx, idx offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne one(%rip), idx offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne two(%rip), idx offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne three(%rip), idx offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne four(%rip), idx offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne five(%rip), idx offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne six(%rip), idx offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne seven(%rip), idx # copy idx to empty lanes copy_lane_data: offset = (_args + _data_ptr) mov offset(state,idx,8), tmp I = 0 .rep 8 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) .altmacro JNE_SKIP %I offset = (_args + _data_ptr + 8*I) mov tmp, offset(state) offset = (_lens + 4*I) movl $0xFFFFFFFF, offset(state) LABEL skip_ %I I = (I+1) .noaltmacro .endr # Find min length vmovdqu _lens+0*16(state), %xmm0 vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword vmovd %xmm2, DWORD_idx mov idx, len2 and $0xF, idx shr $4, len2 jz len_is_0 vpand clear_low_nibble(%rip), %xmm2, %xmm2 vpshufd $0, %xmm2, %xmm2 vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm1, %xmm1 vmovdqu %xmm0, _lens+0*16(state) vmovdqu %xmm1, _lens+1*16(state) # "state" and "args" are the same address, arg1 # len is arg2 call sha1_x8_avx2 # state and idx are intact len_is_0: # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) mov _unused_lanes(state), unused_lanes shl $4, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens(state, idx, 4) vmovd _args_digest(state , idx, 4) , %xmm0 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 movl _args_digest+4*32(state, idx, 4), tmp2_w vmovdqu %xmm0, _result_digest(job_rax) offset = (_result_digest + 1*16) mov tmp2_w, offset(job_rax) return: pop %rbx FRAME_END ret return_null: xor job_rax, job_rax jmp return ENDPROC(sha1_mb_mgr_flush_avx2) ################################################################# .align 16 ENTRY(sha1_mb_mgr_get_comp_job_avx2) push %rbx ## if bit 32+3 is set, then all lanes are empty mov _unused_lanes(state), unused_lanes bt $(32+3), unused_lanes jc .return_null # Find min length vmovdqu _lens(state), %xmm0 vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword vmovd %xmm2, DWORD_idx test $~0xF, idx jnz .return_null # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) mov _unused_lanes(state), unused_lanes shl $4, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens(state, idx, 4) vmovd _args_digest(state, idx, 4), %xmm0 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 movl _args_digest+4*32(state, idx, 4), tmp2_w vmovdqu %xmm0, _result_digest(job_rax) movl tmp2_w, _result_digest+1*16(job_rax) pop %rbx ret .return_null: xor job_rax, job_rax pop %rbx ret ENDPROC(sha1_mb_mgr_get_comp_job_avx2) .section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 .align 16 clear_low_nibble: .octa 0x000000000000000000000000FFFFFFF0 .section .rodata.cst8, "aM", @progbits, 8 .align 8 one: .quad 1 two: .quad 2 three: .quad 3 four: .quad 4 five: .quad 5 six: .quad 6 seven: .quad 7
AirFortressIlikara/LS2K0300-linux-4.19
6,139
arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
/* * Buffer submit code for multi buffer SHA1 algorithm * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * James Guilford <james.guilford@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include <asm/frame.h> #include "sha1_mb_mgr_datastruct.S" .extern sha1_x8_avx # LINUX register definitions arg1 = %rdi arg2 = %rsi size_offset = %rcx tmp2 = %rcx extra_blocks = %rdx # Common definitions #define state arg1 #define job %rsi #define len2 arg2 #define p2 arg2 # idx must be a register not clobberred by sha1_x8_avx2 idx = %r8 DWORD_idx = %r8d last_len = %r8 p = %r11 start_offset = %r11 unused_lanes = %rbx BYTE_unused_lanes = %bl job_rax = %rax len = %rax DWORD_len = %eax lane = %r12 tmp3 = %r12 tmp = %r9 DWORD_tmp = %r9d lane_data = %r10 # JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job) # arg 1 : rcx : state # arg 2 : rdx : job ENTRY(sha1_mb_mgr_submit_avx2) FRAME_BEGIN push %rbx push %r12 mov _unused_lanes(state), unused_lanes mov unused_lanes, lane and $0xF, lane shr $4, unused_lanes imul $_LANE_DATA_size, lane, lane_data movl $STS_BEING_PROCESSED, _status(job) lea _ldata(state, lane_data), lane_data mov unused_lanes, _unused_lanes(state) movl _len(job), DWORD_len mov job, _job_in_lane(lane_data) shl $4, len or lane, len movl DWORD_len, _lens(state , lane, 4) # Load digest words from result_digest vmovdqu _result_digest(job), %xmm0 mov _result_digest+1*16(job), DWORD_tmp vmovd %xmm0, _args_digest(state, lane, 4) vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4) vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4) vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4) movl DWORD_tmp, _args_digest+4*32(state , lane, 4) mov _buffer(job), p mov p, _args_data_ptr(state, lane, 8) cmp $0xF, unused_lanes jne return_null start_loop: # Find min length vmovdqa _lens(state), %xmm0 vmovdqa _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword vmovd %xmm2, DWORD_idx mov idx, len2 and $0xF, idx shr $4, len2 jz len_is_0 vpand clear_low_nibble(%rip), %xmm2, %xmm2 vpshufd $0, %xmm2, %xmm2 vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm1, %xmm1 vmovdqa %xmm0, _lens + 0*16(state) vmovdqa %xmm1, _lens + 1*16(state) # "state" and "args" are the same address, arg1 # len is arg2 call sha1_x8_avx2 # state and idx are intact len_is_0: # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax mov _unused_lanes(state), unused_lanes movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) shl $4, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens(state, idx, 4) vmovd _args_digest(state, idx, 4), %xmm0 vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0 movl _args_digest+4*32(state, idx, 4), DWORD_tmp vmovdqu %xmm0, _result_digest(job_rax) movl DWORD_tmp, _result_digest+1*16(job_rax) return: pop %r12 pop %rbx FRAME_END ret return_null: xor job_rax, job_rax jmp return ENDPROC(sha1_mb_mgr_submit_avx2) .section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 .align 16 clear_low_nibble: .octa 0x000000000000000000000000FFFFFFF0
AirFortressIlikara/LS2K0300-linux-4.19
17,606
arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
/* * Multi-buffer SHA512 algorithm hash compute routine * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ # code to compute quad SHA512 using AVX2 # use YMMs to tackle the larger digest size # outer calling routine takes care of save and restore of XMM registers # Logic designed/laid out by JDG # Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15 # Stack must be aligned to 32 bytes before call # Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12 # Linux preserves: rcx rdx rdi rbp r13 r14 r15 # clobbers ymm0-15 #include <linux/linkage.h> #include "sha512_mb_mgr_datastruct.S" arg1 = %rdi arg2 = %rsi # Common definitions STATE = arg1 INP_SIZE = arg2 IDX = %rax ROUND = %rbx TBL = %r8 inp0 = %r9 inp1 = %r10 inp2 = %r11 inp3 = %r12 a = %ymm0 b = %ymm1 c = %ymm2 d = %ymm3 e = %ymm4 f = %ymm5 g = %ymm6 h = %ymm7 a0 = %ymm8 a1 = %ymm9 a2 = %ymm10 TT0 = %ymm14 TT1 = %ymm13 TT2 = %ymm12 TT3 = %ymm11 TT4 = %ymm10 TT5 = %ymm9 T1 = %ymm14 TMP = %ymm15 # Define stack usage STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24 #define VMOVPD vmovupd _digest = SZ4*16 # transpose r0, r1, r2, r3, t0, t1 # "transpose" data in {r0..r3} using temps {t0..t3} # Input looks like: {r0 r1 r2 r3} # r0 = {a7 a6 a5 a4 a3 a2 a1 a0} # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} # r2 = {c7 c6 c5 c4 c3 c2 c1 c0} # r3 = {d7 d6 d5 d4 d3 d2 d1 d0} # # output looks like: {t0 r1 r0 r3} # t0 = {d1 d0 c1 c0 b1 b0 a1 a0} # r1 = {d3 d2 c3 c2 b3 b2 a3 a2} # r0 = {d5 d4 c5 c4 b5 b4 a5 a4} # r3 = {d7 d6 c7 c6 b7 b6 a7 a6} .macro TRANSPOSE r0 r1 r2 r3 t0 t1 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6 vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2 vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5 vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1 .endm .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm # PRORQ reg, imm, tmp # packed-rotate-right-double # does a rotate by doing two shifts and an or .macro _PRORQ reg imm tmp vpsllq $(64-\imm),\reg,\tmp vpsrlq $\imm,\reg, \reg vpor \tmp,\reg, \reg .endm # non-destructive # PRORQ_nd reg, imm, tmp, src .macro _PRORQ_nd reg imm tmp src vpsllq $(64-\imm), \src, \tmp vpsrlq $\imm, \src, \reg vpor \tmp, \reg, \reg .endm # PRORQ dst/src, amt .macro PRORQ reg imm _PRORQ \reg, \imm, TMP .endm # PRORQ_nd dst, src, amt .macro PRORQ_nd reg tmp imm _PRORQ_nd \reg, \imm, TMP, \tmp .endm #; arguments passed implicitly in preprocessor symbols i, a...h .macro ROUND_00_15 _T1 i PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4) vpxor g, f, a2 # ch: a2 = f^g vpand e,a2, a2 # ch: a2 = (f^g)&e vpxor g, a2, a2 # a2 = ch PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25) offset = SZ4*(\i & 0xf) vmovdqu \_T1,offset(%rsp) vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5) PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11) vpaddq a2, h, h # h = h + ch PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11) vpaddq \_T1,h, h # h = h + ch + W + K vpxor a1, a0, a0 # a0 = sigma1 vmovdqu a,\_T1 PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22) vpxor c, \_T1, \_T1 # maj: T1 = a^c add $SZ4, ROUND # ROUND++ vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b vpaddq a0, h, h vpaddq h, d, d vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11) PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13) vpxor a1, a2, a2 # a2 = sig0 vpand c, a, a1 # maj: a1 = a&c vpor \_T1, a1, a1 # a1 = maj vpaddq a1, h, h # h = h + ch + W + K + maj vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0 ROTATE_ARGS .endm #; arguments passed implicitly in preprocessor symbols i, a...h .macro ROUND_16_XX _T1 i vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1 vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1 vmovdqu \_T1, a0 PRORQ \_T1,7 vmovdqu a1, a2 PRORQ a1,42 vpxor a0, \_T1, \_T1 PRORQ \_T1, 1 vpxor a2, a1, a1 PRORQ a1, 19 vpsrlq $7, a0, a0 vpxor a0, \_T1, \_T1 vpsrlq $6, a2, a2 vpxor a2, a1, a1 vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1 vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1 vpaddq a1, \_T1, \_T1 ROUND_00_15 \_T1,\i .endm # void sha512_x4_avx2(void *STATE, const int INP_SIZE) # arg 1 : STATE : pointer to input data # arg 2 : INP_SIZE : size of data in blocks (assumed >= 1) ENTRY(sha512_x4_avx2) # general registers preserved in outer calling routine # outer calling routine saves all the XMM registers # save callee-saved clobbered registers to comply with C function ABI push %r12 push %r13 push %r14 push %r15 sub $STACK_SPACE1, %rsp # Load the pre-transposed incoming digest. vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h lea K512_4(%rip),TBL # load the address of each of the 4 message lanes # getting ready to transpose input onto stack mov _data_ptr+0*PTR_SZ(STATE),inp0 mov _data_ptr+1*PTR_SZ(STATE),inp1 mov _data_ptr+2*PTR_SZ(STATE),inp2 mov _data_ptr+3*PTR_SZ(STATE),inp3 xor IDX, IDX lloop: xor ROUND, ROUND # save old digest vmovdqu a, _digest(%rsp) vmovdqu b, _digest+1*SZ4(%rsp) vmovdqu c, _digest+2*SZ4(%rsp) vmovdqu d, _digest+3*SZ4(%rsp) vmovdqu e, _digest+4*SZ4(%rsp) vmovdqu f, _digest+5*SZ4(%rsp) vmovdqu g, _digest+6*SZ4(%rsp) vmovdqu h, _digest+7*SZ4(%rsp) i = 0 .rep 4 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP VMOVPD i*32(inp0, IDX), TT2 VMOVPD i*32(inp1, IDX), TT1 VMOVPD i*32(inp2, IDX), TT4 VMOVPD i*32(inp3, IDX), TT3 TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5 vpshufb TMP, TT0, TT0 vpshufb TMP, TT1, TT1 vpshufb TMP, TT2, TT2 vpshufb TMP, TT3, TT3 ROUND_00_15 TT0,(i*4+0) ROUND_00_15 TT1,(i*4+1) ROUND_00_15 TT2,(i*4+2) ROUND_00_15 TT3,(i*4+3) i = (i+1) .endr add $128, IDX i = (i*4) jmp Lrounds_16_xx .align 16 Lrounds_16_xx: .rep 16 ROUND_16_XX T1, i i = (i+1) .endr cmp $0xa00,ROUND jb Lrounds_16_xx # add old digest vpaddq _digest(%rsp), a, a vpaddq _digest+1*SZ4(%rsp), b, b vpaddq _digest+2*SZ4(%rsp), c, c vpaddq _digest+3*SZ4(%rsp), d, d vpaddq _digest+4*SZ4(%rsp), e, e vpaddq _digest+5*SZ4(%rsp), f, f vpaddq _digest+6*SZ4(%rsp), g, g vpaddq _digest+7*SZ4(%rsp), h, h sub $1, INP_SIZE # unit is blocks jne lloop # write back to memory (state object) the transposed digest vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE) vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE) # update input data pointers add IDX, inp0 mov inp0, _data_ptr+0*PTR_SZ(STATE) add IDX, inp1 mov inp1, _data_ptr+1*PTR_SZ(STATE) add IDX, inp2 mov inp2, _data_ptr+2*PTR_SZ(STATE) add IDX, inp3 mov inp3, _data_ptr+3*PTR_SZ(STATE) #;;;;;;;;;;;;;;; #; Postamble add $STACK_SPACE1, %rsp # restore callee-saved clobbered registers pop %r15 pop %r14 pop %r13 pop %r12 # outer calling routine restores XMM and other GP registers ret ENDPROC(sha512_x4_avx2) .section .rodata.K512_4, "a", @progbits .align 64 K512_4: .octa 0x428a2f98d728ae22428a2f98d728ae22,\ 0x428a2f98d728ae22428a2f98d728ae22 .octa 0x7137449123ef65cd7137449123ef65cd,\ 0x7137449123ef65cd7137449123ef65cd .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\ 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\ 0xe9b5dba58189dbbce9b5dba58189dbbc .octa 0x3956c25bf348b5383956c25bf348b538,\ 0x3956c25bf348b5383956c25bf348b538 .octa 0x59f111f1b605d01959f111f1b605d019,\ 0x59f111f1b605d01959f111f1b605d019 .octa 0x923f82a4af194f9b923f82a4af194f9b,\ 0x923f82a4af194f9b923f82a4af194f9b .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\ 0xab1c5ed5da6d8118ab1c5ed5da6d8118 .octa 0xd807aa98a3030242d807aa98a3030242,\ 0xd807aa98a3030242d807aa98a3030242 .octa 0x12835b0145706fbe12835b0145706fbe,\ 0x12835b0145706fbe12835b0145706fbe .octa 0x243185be4ee4b28c243185be4ee4b28c,\ 0x243185be4ee4b28c243185be4ee4b28c .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\ 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2 .octa 0x72be5d74f27b896f72be5d74f27b896f,\ 0x72be5d74f27b896f72be5d74f27b896f .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\ 0x80deb1fe3b1696b180deb1fe3b1696b1 .octa 0x9bdc06a725c712359bdc06a725c71235,\ 0x9bdc06a725c712359bdc06a725c71235 .octa 0xc19bf174cf692694c19bf174cf692694,\ 0xc19bf174cf692694c19bf174cf692694 .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\ 0xe49b69c19ef14ad2e49b69c19ef14ad2 .octa 0xefbe4786384f25e3efbe4786384f25e3,\ 0xefbe4786384f25e3efbe4786384f25e3 .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\ 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5 .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\ 0x240ca1cc77ac9c65240ca1cc77ac9c65 .octa 0x2de92c6f592b02752de92c6f592b0275,\ 0x2de92c6f592b02752de92c6f592b0275 .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\ 0x4a7484aa6ea6e4834a7484aa6ea6e483 .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\ 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4 .octa 0x76f988da831153b576f988da831153b5,\ 0x76f988da831153b576f988da831153b5 .octa 0x983e5152ee66dfab983e5152ee66dfab,\ 0x983e5152ee66dfab983e5152ee66dfab .octa 0xa831c66d2db43210a831c66d2db43210,\ 0xa831c66d2db43210a831c66d2db43210 .octa 0xb00327c898fb213fb00327c898fb213f,\ 0xb00327c898fb213fb00327c898fb213f .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\ 0xbf597fc7beef0ee4bf597fc7beef0ee4 .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\ 0xc6e00bf33da88fc2c6e00bf33da88fc2 .octa 0xd5a79147930aa725d5a79147930aa725,\ 0xd5a79147930aa725d5a79147930aa725 .octa 0x06ca6351e003826f06ca6351e003826f,\ 0x06ca6351e003826f06ca6351e003826f .octa 0x142929670a0e6e70142929670a0e6e70,\ 0x142929670a0e6e70142929670a0e6e70 .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\ 0x27b70a8546d22ffc27b70a8546d22ffc .octa 0x2e1b21385c26c9262e1b21385c26c926,\ 0x2e1b21385c26c9262e1b21385c26c926 .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\ 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed .octa 0x53380d139d95b3df53380d139d95b3df,\ 0x53380d139d95b3df53380d139d95b3df .octa 0x650a73548baf63de650a73548baf63de,\ 0x650a73548baf63de650a73548baf63de .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\ 0x766a0abb3c77b2a8766a0abb3c77b2a8 .octa 0x81c2c92e47edaee681c2c92e47edaee6,\ 0x81c2c92e47edaee681c2c92e47edaee6 .octa 0x92722c851482353b92722c851482353b,\ 0x92722c851482353b92722c851482353b .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\ 0xa2bfe8a14cf10364a2bfe8a14cf10364 .octa 0xa81a664bbc423001a81a664bbc423001,\ 0xa81a664bbc423001a81a664bbc423001 .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\ 0xc24b8b70d0f89791c24b8b70d0f89791 .octa 0xc76c51a30654be30c76c51a30654be30,\ 0xc76c51a30654be30c76c51a30654be30 .octa 0xd192e819d6ef5218d192e819d6ef5218,\ 0xd192e819d6ef5218d192e819d6ef5218 .octa 0xd69906245565a910d69906245565a910,\ 0xd69906245565a910d69906245565a910 .octa 0xf40e35855771202af40e35855771202a,\ 0xf40e35855771202af40e35855771202a .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\ 0x106aa07032bbd1b8106aa07032bbd1b8 .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\ 0x19a4c116b8d2d0c819a4c116b8d2d0c8 .octa 0x1e376c085141ab531e376c085141ab53,\ 0x1e376c085141ab531e376c085141ab53 .octa 0x2748774cdf8eeb992748774cdf8eeb99,\ 0x2748774cdf8eeb992748774cdf8eeb99 .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\ 0x34b0bcb5e19b48a834b0bcb5e19b48a8 .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\ 0x391c0cb3c5c95a63391c0cb3c5c95a63 .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\ 0x4ed8aa4ae3418acb4ed8aa4ae3418acb .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\ 0x5b9cca4f7763e3735b9cca4f7763e373 .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\ 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3 .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\ 0x748f82ee5defb2fc748f82ee5defb2fc .octa 0x78a5636f43172f6078a5636f43172f60,\ 0x78a5636f43172f6078a5636f43172f60 .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\ 0x84c87814a1f0ab7284c87814a1f0ab72 .octa 0x8cc702081a6439ec8cc702081a6439ec,\ 0x8cc702081a6439ec8cc702081a6439ec .octa 0x90befffa23631e2890befffa23631e28,\ 0x90befffa23631e2890befffa23631e28 .octa 0xa4506cebde82bde9a4506cebde82bde9,\ 0xa4506cebde82bde9a4506cebde82bde9 .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\ 0xbef9a3f7b2c67915bef9a3f7b2c67915 .octa 0xc67178f2e372532bc67178f2e372532b,\ 0xc67178f2e372532bc67178f2e372532b .octa 0xca273eceea26619cca273eceea26619c,\ 0xca273eceea26619cca273eceea26619c .octa 0xd186b8c721c0c207d186b8c721c0c207,\ 0xd186b8c721c0c207d186b8c721c0c207 .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\ 0xeada7dd6cde0eb1eeada7dd6cde0eb1e .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\ 0xf57d4f7fee6ed178f57d4f7fee6ed178 .octa 0x06f067aa72176fba06f067aa72176fba,\ 0x06f067aa72176fba06f067aa72176fba .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\ 0x0a637dc5a2c898a60a637dc5a2c898a6 .octa 0x113f9804bef90dae113f9804bef90dae,\ 0x113f9804bef90dae113f9804bef90dae .octa 0x1b710b35131c471b1b710b35131c471b,\ 0x1b710b35131c471b1b710b35131c471b .octa 0x28db77f523047d8428db77f523047d84,\ 0x28db77f523047d8428db77f523047d84 .octa 0x32caab7b40c7249332caab7b40c72493,\ 0x32caab7b40c7249332caab7b40c72493 .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\ 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc .octa 0x431d67c49c100d4c431d67c49c100d4c,\ 0x431d67c49c100d4c431d67c49c100d4c .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\ 0x4cc5d4becb3e42b64cc5d4becb3e42b6 .octa 0x597f299cfc657e2a597f299cfc657e2a,\ 0x597f299cfc657e2a597f299cfc657e2a .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\ 0x5fcb6fab3ad6faec5fcb6fab3ad6faec .octa 0x6c44198c4a4758176c44198c4a475817,\ 0x6c44198c4a4758176c44198c4a475817 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617