repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
ProjectEverest/frameworks_av | 4,943 | media/module/codecs/amrwb/enc/src/asm/ARMV7/scale_sig_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@void Scale_sig(
@ Word16 x[], /* (i/o) : signal to scale */
@ Word16 lg, /* (i) : size of x[] */
@ Word16 exp /* (i) : exponent: x = round(x << exp) */
@)
@***********************************************************************
@ x[] --- r0
@ lg --- r1
@ exp --- r2
.section .text
.global Scale_sig_opt
Scale_sig_opt:
STMFD r13!, {r4 - r12, r14}
MOV r4, #4
VMOV.S32 Q15, #0x8000
VDUP.S32 Q14, r2
MOV r5, r0 @ copy x[] address
CMP r1, #64
MOVEQ r4, #1
BEQ LOOP
CMP r1, #128
MOVEQ r4, #2
BEQ LOOP
CMP r1, #256
BEQ LOOP
CMP r1, #80
MOVEQ r4, #1
BEQ LOOP1
LOOP1:
VLD1.S16 {Q0, Q1}, [r5]! @load 16 Word16 x[]
VSHLL.S16 Q10, D0, #16
VSHLL.S16 Q11, D1, #16
VSHLL.S16 Q12, D2, #16
VSHLL.S16 Q13, D3, #16
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VADDHN.S32 D16, Q10, Q15
VADDHN.S32 D17, Q11, Q15
VADDHN.S32 D18, Q12, Q15
VADDHN.S32 D19, Q13, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
LOOP:
VLD1.S16 {Q0, Q1}, [r5]! @load 16 Word16 x[]
VLD1.S16 {Q2, Q3}, [r5]! @load 16 Word16 x[]
VLD1.S16 {Q4, Q5}, [r5]! @load 16 Word16 x[]
VLD1.S16 {Q6, Q7}, [r5]! @load 16 Word16 x[]
VSHLL.S16 Q8, D0, #16
VSHLL.S16 Q9, D1, #16
VSHLL.S16 Q10, D2, #16
VSHLL.S16 Q11, D3, #16
VSHL.S32 Q8, Q8, Q14
VSHL.S32 Q9, Q9, Q14
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VADDHN.S32 D16, Q8, Q15
VADDHN.S32 D17, Q9, Q15
VADDHN.S32 D18, Q10, Q15
VADDHN.S32 D19, Q11, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
VSHLL.S16 Q12, D4, #16
VSHLL.S16 Q13, D5, #16
VSHLL.S16 Q10, D6, #16
VSHLL.S16 Q11, D7, #16
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VADDHN.S32 D16, Q12, Q15
VADDHN.S32 D17, Q13, Q15
VADDHN.S32 D18, Q10, Q15
VADDHN.S32 D19, Q11, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
VSHLL.S16 Q10, D8, #16
VSHLL.S16 Q11, D9, #16
VSHLL.S16 Q12, D10, #16
VSHLL.S16 Q13, D11, #16
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VADDHN.S32 D16, Q10, Q15
VADDHN.S32 D17, Q11, Q15
VADDHN.S32 D18, Q12, Q15
VADDHN.S32 D19, Q13, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
VSHLL.S16 Q10, D12, #16
VSHLL.S16 Q11, D13, #16
VSHLL.S16 Q12, D14, #16
VSHLL.S16 Q13, D15, #16
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VADDHN.S32 D16, Q10, Q15
VADDHN.S32 D17, Q11, Q15
VADDHN.S32 D18, Q12, Q15
VADDHN.S32 D19, Q13, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
SUBS r4, r4, #1
BGT LOOP
Scale_sig_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 4,836 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Syn_filt_32_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@void Syn_filt_32(
@ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
@ Word16 m, /* (i) : order of LP filter */
@ Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
@ Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
@ Word16 sig_hi[], /* (o) /16 : synthesis high */
@ Word16 sig_lo[], /* (o) /16 : synthesis low */
@ Word16 lg /* (i) : size of filtering */
@)
@***********************************************************************
@ a[] --- r0
@ m --- r1
@ exc[] --- r2
@ Qnew --- r3
@ sig_hi[] --- r4
@ sig_lo[] --- r5
@ lg --- r6
.section .text
.global Syn_filt_32_asm
Syn_filt_32_asm:
STMFD r13!, {r4 - r12, r14}
LDR r4, [r13, #40] @ get sig_hi[] address
LDR r5, [r13, #44] @ get sig_lo[] address
LDRSH r6, [r0], #2 @ load Aq[0]
ADD r7, r3, #4 @ 4 + Q_new
MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new)
SUB r10, r4, #32 @ sig_hi[-16] address
SUB r11, r5, #32 @ sig_lo[-16] address
VLD1.S16 {D0, D1, D2, D3}, [r0]! @a[1] ~ a[16]
MOV r8, #0 @ i = 0
VLD1.S16 {D4, D5, D6, D7}, [r10]! @ sig_hi[-16] ~ sig_hi[-1]
VREV64.16 D0, D0
VREV64.16 D1, D1
VLD1.S16 {D8, D9, D10, D11}, [r11]! @ sig_lo[-16] ~ sig_lo[-1]
VREV64.16 D2, D2
VREV64.16 D3, D3
VDUP.S32 Q15, r8
SYN_LOOP:
LDRSH r6, [r2], #2 @exc[i]
@L_tmp = L_msu(L_tmp, sig_lo[i - j], a[j])@
VMULL.S16 Q10, D8, D3
VEXT.8 D8, D8, D9, #2
VMLAL.S16 Q10, D9, D2
VMLAL.S16 Q10, D10, D1
VMLAL.S16 Q10, D11, D0
VEXT.8 D9, D9, D10, #2
VEXT.8 D10, D10, D11, #2
VPADD.S32 D28, D20, D21
MUL r12, r6, r3 @exc[i] * a0
VPADD.S32 D29, D28, D28
VDUP.S32 Q10, D29[0] @result1
VMULL.S16 Q11, D4, D3
VMLAL.S16 Q11, D5, D2
VSUB.S32 Q10, Q15, Q10
@L_tmp = L_msu(L_tmp, sig_hi[i - j], a[j])@
VMLAL.S16 Q11, D6, D1
VEXT.8 D4, D4, D5, #2
VMLAL.S16 Q11, D7, D0
VEXT.8 D5, D5, D6, #2
VEXT.8 D6, D6, D7, #2
VPADD.S32 D28, D22, D23
VPADD.S32 D29, D28, D28
MOV r14, r12, LSL #1 @exc[i] * a0 << 1
VDUP.S32 Q11, D29[0] @result2
VSHR.S32 Q10, Q10, #11 @result1 >>= 11
VSHL.S32 Q11, Q11, #1 @result2 <<= 1
VDUP.S32 Q12, r14
VADD.S32 Q12, Q12, Q10 @L_tmp = L_tmp - (result1 >>= 11) - (result2 <<= 1)
VSUB.S32 Q12, Q12, Q11
VSHL.S32 Q12, Q12, #3 @L_tmp <<= 3
VSHRN.S32 D20, Q12, #16 @sig_hi[i] = L_tmp >> 16@
VMOV.S16 r10, D20[0]
VSHR.S32 Q12, Q12, #4 @L_tmp >>= 4
VEXT.8 D7, D7, D20, #2
STRH r10, [r4], #2 @store sig_hi[i]
VMOV.S32 r11, D24[0] @r11 --- L_tmp >>= 4
ADD r8, r8, #1
SUB r12, r11, r10, LSL #12
@MOV r11, r12, ASR #16 @sig_lo[i]
VDUP.S16 D21, r12
VEXT.8 D11, D11, D21, #2
STRH r12, [r5], #2 @stroe sig_lo[i]
CMP r8, #64
BLT SYN_LOOP
Syn_filt_32_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,600 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Deemph_32_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Deemph_32(
@ Word16 x_hi[], /* (i) : input signal (bit31..16) */
@ Word16 x_lo[], /* (i) : input signal (bit15..4) */
@ Word16 y[], /* (o) : output signal (x16) */
@ Word16 mu, /* (i) Q15 : deemphasis factor */
@ Word16 L, /* (i) : vector size */
@ Word16 * mem /* (i/o) : memory (y[-1]) */
@ )
@x_hi RN R0
@x_lo RN R1
@y[] RN R2
@*mem RN R3
.section .text
.global Deemph_32_asm
Deemph_32_asm:
STMFD r13!, {r4 - r12, r14}
MOV r4, #2 @i=0
LDRSH r6, [r0], #2 @load x_hi[0]
LDRSH r7, [r1], #2 @load x_lo[0]
LDR r5, =22282 @r5---mu
MOV r11, #0x8000
@y[0]
MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16
MOV r8, r5, ASR #1 @fac = mu >> 1
LDR r5, [r3]
ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4
MOV r10, r12, LSL #3 @L_tmp <<= 3
MUL r9, r5, r8
LDRSH r6, [r0], #2 @load x_hi[1]
QDADD r10, r10, r9
LDRSH r7, [r1], #2 @load x_lo[1]
MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac)
QADD r10, r12, r11
MOV r14, r10, ASR #16 @y[0] = round(L_tmp)
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
STRH r14, [r2], #2 @update y[0]
MOV r10, r12, LSL #3
MUL r9, r14, r8
QDADD r10, r10, r9
MOV r12, r10, LSL #1
QADD r10, r12, r11
MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
LOOP:
LDRSH r6, [r0], #2 @load x_hi[]
LDRSH r7, [r1], #2
STRH r14, [r2], #2
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
MUL r9, r14, r8
MOV r10, r12, LSL #3
QDADD r10, r10, r9
LDRSH r6, [r0], #2 @load x_hi[]
MOV r12, r10, LSL #1
QADD r10, r12, r11
LDRSH r7, [r1], #2
MOV r14, r10, ASR #16
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
STRH r14, [r2], #2
MUL r9, r14, r8
MOV r10, r12, LSL #3
QDADD r10, r10, r9
ADD r4, r4, #2
MOV r12, r10, LSL #1
QADD r10, r12, r11
CMP r4, #64
MOV r14, r10, ASR #16
BLT LOOP
STR r14, [r3]
STRH r14, [r2]
LDMFD r13!, {r4 - r12, r15}
.end
|
ProjectEverest/frameworks_av | 5,738 | media/module/codecs/amrwb/enc/src/asm/ARMV7/convolve_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@*void Convolve (
@* Word16 x[], /* (i) : input vector */
@* Word16 h[], /* (i) : impulse response */
@* Word16 y[], /* (o) : output vector */
@* Word16 L /* (i) : vector size */
@*)
@
@ r0 --- x[]
@ r1 --- h[]
@ r2 --- y[]
@ r3 --- L
.section .text
.global Convolve_asm
Convolve_asm:
STMFD r13!, {r4 - r12, r14}
MOV r3, #0
MOV r11, #0x8000
LOOP:
@MOV r8, #0 @ s = 0
ADD r4, r1, r3, LSL #1 @ tmpH address
ADD r5, r3, #1 @ i = n + 1
MOV r6, r0
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4] @ *tmpH--
SUB r5, r5, #1
VMOV.S32 Q10, #0
MUL r8, r9, r10
LOOP1:
CMP r5, #0
BLE L1
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
VREV64.16 D1, D1
SUBS r5, r5, #4
VMLAL.S16 Q10, D0, D1
B LOOP1
L1:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r5, r5, r8
ADD r5, r11, r5, LSL #1
MOV r5, r5, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r5, [r2], #2 @y[n]
@MOV r8, #0
ADD r4, r1, r3, LSL #1 @tmpH address
ADD r5, r3, #1
MOV r6, r0
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2
LDRSH r12, [r6], #2
LDRSH r14, [r4]
MUL r8, r9, r10
SUB r5, r5, #2
MLA r8, r12, r14, r8
VMOV.S32 Q10, #0
LOOP2:
CMP r5, #0
BLE L2
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
SUBS r5, r5, #4
VREV64.16 D1, D1
VMLAL.S16 Q10, D0, D1
B LOOP2
L2:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r8, r8, r5
ADD r8, r11, r8, LSL #1
MOV r8, r8, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r8, [r2], #2 @y[n]
@MOV r8, #0
ADD r4, r1, r3, LSL #1
ADD r5, r3, #1
MOV r6, r0
LDRSH r9, [r6], #2
LDRSH r10, [r4], #-2
LDRSH r12, [r6], #2
LDRSH r14, [r4], #-2
MUL r8, r9, r10
LDRSH r9, [r6], #2
LDRSH r10, [r4]
MLA r8, r12, r14, r8
SUB r5, r5, #3
MLA r8, r9, r10, r8
VMOV.S32 Q10, #0
LOOP3:
CMP r5, #0
BLE L3
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
VREV64.16 D1, D1
SUBS r5, r5, #4
VMLAL.S16 Q10, D0, D1
B LOOP3
L3:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r8, r8, r5
ADD r8, r11, r8, LSL #1
MOV r8, r8, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r8, [r2], #2 @y[n]
ADD r5, r3, #1 @ i = n + 1
ADD r4, r1, r5, LSL #1 @ tmpH address
MOV r6, r0
VMOV.S32 Q10, #0
LOOP4:
CMP r5, #0
BLE L4
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
VREV64.16 D1, D1
SUBS r5, r5, #4
VMLAL.S16 Q10, D0, D1
B LOOP4
L4:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r5, r11, r5, LSL #1
MOV r5, r5, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r5, [r2], #2 @y[n]
CMP r3, #64
BLT LOOP
Convolve_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 4,376 | media/module/codecs/amrwb/enc/src/asm/ARMV7/residu_asm_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Residu (
@ Word16 a[], /* (i) : prediction coefficients */
@ Word16 x[], /* (i) : speech signal */
@ Word16 y[], /* (o) : residual signal */
@ Word16 lg /* (i) : size of filtering */
@)
@a[] RN r0
@x[] RN r1
@y[] RN r2
@lg RN r3
.section .text
.global Residu_opt
Residu_opt:
STMFD r13!, {r4 - r12, r14}
SUB r7, r3, #4 @i = lg - 4
VLD1.S16 {D0, D1, D2, D3}, [r0]! @get all a[]
VLD1.S16 {D4}, [r0]!
VMOV.S32 Q8, #0x8000
LOOP1:
ADD r9, r1, r7, LSL #1 @copy the address
ADD r10, r2, r7, LSL #1
MOV r8, r9
VLD1.S16 D5, [r8]! @get x[i], x[i+1], x[i+2], x[i+3]
VQDMULL.S16 Q10, D5, D0[0] @finish the first L_mult
SUB r8, r9, #2 @get the x[i-1] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D0[1]
SUB r8, r9, #4 @load the x[i-2] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D0[2]
SUB r8, r9, #6 @load the x[i-3] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D0[3]
SUB r8, r9, #8 @load the x[i-4] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[0]
SUB r8, r9, #10 @load the x[i-5] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[1]
SUB r8, r9, #12 @load the x[i-6] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[2]
SUB r8, r9, #14 @load the x[i-7] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[3]
SUB r8, r9, #16 @load the x[i-8] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[0]
SUB r8, r9, #18 @load the x[i-9] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[1]
SUB r8, r9, #20 @load the x[i-10] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[2]
SUB r8, r9, #22 @load the x[i-11] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[3]
SUB r8, r9, #24 @load the x[i-12] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[0]
SUB r8, r9, #26 @load the x[i-13] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[1]
SUB r8, r9, #28 @load the x[i-14] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[2]
SUB r8, r9, #30 @load the x[i-15] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[3]
SUB r8, r9, #32 @load the x[i-16] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D4[0]
SUB r7, r7, #4 @i-=4
VQSHL.S32 Q10, Q10, #4
VQADD.S32 Q10, Q10, Q8
VSHRN.S32 D5, Q10, #16
VST1.S16 D5, [r10]!
CMP r7, #0
BGE LOOP1
Residu_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,667 | media/module/codecs/amrwb/enc/src/asm/ARMV7/pred_lt4_1_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Pred_lt4(
@ Word16 exc[], /* in/out: excitation buffer */
@ Word16 T0, /* input : integer pitch lag */
@ Word16 frac, /* input : fraction of lag */
@ Word16 L_subfr /* input : subframe size */
@)
@***********************************************************************
@ r0 --- exc[]
@ r1 --- T0
@ r2 --- frac
@ r3 --- L_subfr
.section .text
.global pred_lt4_asm
.extern inter4_2
.hidden inter4_2
pred_lt4_asm:
STMFD r13!, {r4 - r12, r14}
SUB r4, r0, r1, LSL #1 @ x = exc - T0
RSB r2, r2, #0 @ frac = - frac
SUB r4, r4, #30 @ x -= L_INTERPOL2 - 1
CMP r2, #0
ADDLT r2, r2, #4 @ frac += UP_SAMP
SUBLT r4, r4, #2 @ x--
ADR r8, Lable1
LDR r11, [r8]
ADD r11, r8
RSB r2, r2, #3 @ k = UP_SAMP - 1 - frac
MOV r8, #0 @ j = 0
ADD r11, r11, r2, LSL #6 @ get inter4_2[k][]
VLD1.S16 {Q0, Q1}, [r11]!
VLD1.S16 {Q2, Q3}, [r11]!
MOV r6, #0x8000
VLD1.S16 {Q4, Q5}, [r4]! @load 16 x[]
VLD1.S16 {Q6, Q7}, [r4]! @load 16 x[]
LOOP:
VQDMULL.S16 Q15, D8, D0
VQDMLAL.S16 Q15, D9, D1
VQDMLAL.S16 Q15, D10, D2
VQDMLAL.S16 Q15, D11, D3
VQDMLAL.S16 Q15, D12, D4
VQDMLAL.S16 Q15, D13, D5
VQDMLAL.S16 Q15, D14, D6
VQDMLAL.S16 Q15, D15, D7
LDRSH r12, [r4], #2
VEXT.S16 D8, D8, D9, #1
VEXT.S16 D9, D9, D10, #1
VEXT.S16 D10, D10, D11, #1
VEXT.S16 D11, D11, D12, #1
VDUP.S16 D24, r12
VEXT.S16 D12, D12, D13, #1
VEXT.S16 D13, D13, D14, #1
VQADD.S32 D30, D30, D31
MOV r11, #0x8000
VPADD.S32 D30, D30, D30
ADD r8, r8, #1
VMOV.S32 r12, D30[0]
VEXT.S16 D14, D14, D15, #1
QADD r1, r12, r12 @ L_sum = (L_sum << 2)
VEXT.S16 D15, D15, D24, #1
QADD r5, r1, r6
MOV r1, r5, ASR #16
CMP r8, r3
STRH r1, [r0], #2 @ exc[j] = (L_sum + 0x8000) >> 16
BLT LOOP
pred_lt4_end:
LDMFD r13!, {r4 - r12, r15}
Lable1:
.word inter4_2-Lable1
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 9,556 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Norm_Corr_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@static void Norm_Corr (Word16 exc[], /* (i) : excitation buffer */
@ Word16 xn[], /* (i) : target vector */
@ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
@ Word16 L_subfr, /* (i) : sub-frame length */
@ Word16 t_min, /* (i) : minimum value of pitch lag. */
@ Word16 t_max, /* (i) : maximum value of pitch lag. */
@ Word16 corr_norm[]) /* (o) Q15 : normalized correlation */
@
@ r0 --- exc[]
@ r1 --- xn[]
@ r2 --- h[]
@ r3 --- L_subfr
@ r4 --- t_min
@ r5 --- t_max
@ r6 --- corr_norm[]
.section .text
.global Norm_corr_asm
.extern Convolve_asm
.extern Isqrt_n
@******************************
@ constant
@******************************
.equ EXC , 0
.equ XN , 4
.equ H , 8
.equ L_SUBFR , 12
.equ voSTACK , 172
.equ T_MIN , 212
.equ T_MAX , 216
.equ CORR_NORM , 220
Norm_corr_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #voSTACK
ADD r8, r13, #20 @get the excf[L_SUBFR]
LDR r4, [r13, #T_MIN] @get t_min
RSB r11, r4, #0 @k = -t_min
ADD r5, r0, r11, LSL #1 @get the &exc[k]
@transfer Convolve function
STMFD sp!, {r0 - r3}
MOV r0, r5
MOV r1, r2
MOV r2, r8 @r2 --- excf[]
BL Convolve_asm
LDMFD sp!, {r0 - r3}
@ r8 --- excf[]
MOV r14, r1 @copy xn[] address
MOV r7, #1
VLD1.S16 {Q0, Q1}, [r14]!
VLD1.S16 {Q2, Q3}, [r14]!
VLD1.S16 {Q4, Q5}, [r14]!
VLD1.S16 {Q6, Q7}, [r14]!
VMULL.S16 Q10, D0, D0
VMLAL.S16 Q10, D1, D1
VMLAL.S16 Q10, D2, D2
VMLAL.S16 Q10, D3, D3
VMLAL.S16 Q10, D4, D4
VMLAL.S16 Q10, D5, D5
VMLAL.S16 Q10, D6, D6
VMLAL.S16 Q10, D7, D7
VMLAL.S16 Q10, D8, D8
VMLAL.S16 Q10, D9, D9
VMLAL.S16 Q10, D10, D10
VMLAL.S16 Q10, D11, D11
VMLAL.S16 Q10, D12, D12
VMLAL.S16 Q10, D13, D13
VMLAL.S16 Q10, D14, D14
VMLAL.S16 Q10, D15, D15
VQADD.S32 D20, D20, D21
VMOV.S32 r9, D20[0]
VMOV.S32 r10, D20[1]
QADD r6, r9, r10
QADD r6, r6, r6
QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1;
CLZ r7, r9
SUB r6, r7, #1 @exp = norm_l(L_tmp)
RSB r7, r6, #32 @exp = 32 - exp
MOV r6, r7, ASR #1
RSB r7, r6, #0 @scale = -(exp >> 1)
@loop for every possible period
@for(t = t_min@ t <= t_max@ t++)
@r7 --- scale r4 --- t_min r8 --- excf[]
LOOPFOR:
ADD r14, r13, #20 @copy of excf[]
MOV r12, r1 @copy of xn[]
MOV r8, #0x8000
VLD1.S16 {Q0, Q1}, [r14]! @ load 16 excf[]
VLD1.S16 {Q2, Q3}, [r14]! @ load 16 excf[]
VLD1.S16 {Q4, Q5}, [r12]! @ load 16 x[]
VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[]
VMULL.S16 Q10, D0, D0 @L_tmp1 += excf[] * excf[]
VMULL.S16 Q11, D0, D8 @L_tmp += x[] * excf[]
VMLAL.S16 Q10, D1, D1
VMLAL.S16 Q11, D1, D9
VMLAL.S16 Q10, D2, D2
VMLAL.S16 Q11, D2, D10
VMLAL.S16 Q10, D3, D3
VMLAL.S16 Q11, D3, D11
VMLAL.S16 Q10, D4, D4
VMLAL.S16 Q11, D4, D12
VMLAL.S16 Q10, D5, D5
VMLAL.S16 Q11, D5, D13
VMLAL.S16 Q10, D6, D6
VMLAL.S16 Q11, D6, D14
VMLAL.S16 Q10, D7, D7
VMLAL.S16 Q11, D7, D15
VLD1.S16 {Q0, Q1}, [r14]! @ load 16 excf[]
VLD1.S16 {Q2, Q3}, [r14]! @ load 16 excf[]
VLD1.S16 {Q4, Q5}, [r12]! @ load 16 x[]
VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[]
VMLAL.S16 Q10, D0, D0
VMLAL.S16 Q11, D0, D8
VMLAL.S16 Q10, D1, D1
VMLAL.S16 Q11, D1, D9
VMLAL.S16 Q10, D2, D2
VMLAL.S16 Q11, D2, D10
VMLAL.S16 Q10, D3, D3
VMLAL.S16 Q11, D3, D11
VMLAL.S16 Q10, D4, D4
VMLAL.S16 Q11, D4, D12
VMLAL.S16 Q10, D5, D5
VMLAL.S16 Q11, D5, D13
VMLAL.S16 Q10, D6, D6
VMLAL.S16 Q11, D6, D14
VMLAL.S16 Q10, D7, D7
VMLAL.S16 Q11, D7, D15
VQADD.S32 D20, D20, D21
VQADD.S32 D22, D22, D23
VPADD.S32 D20, D20, D20 @D20[0] --- L_tmp1 << 1
VPADD.S32 D22, D22, D22 @D22[0] --- L_tmp << 1
VMOV.S32 r6, D20[0]
VMOV.S32 r5, D22[0]
@r5 --- L_tmp, r6 --- L_tmp1
MOV r10, #1
ADD r5, r10, r5, LSL #1 @L_tmp = (L_tmp << 1) + 1
ADD r6, r10, r6, LSL #1 @L_tmp1 = (L_tmp1 << 1) + 1
CLZ r10, r5
CMP r5, #0
RSBLT r11, r5, #0
CLZLT r10, r11
SUB r10, r10, #1 @exp = norm_l(L_tmp)
MOV r5, r5, LSL r10 @L_tmp = (L_tmp << exp)
RSB r10, r10, #30 @exp_corr = 30 - exp
MOV r11, r5, ASR #16 @corr = extract_h(L_tmp)
CLZ r5, r6
SUB r5, r5, #1
MOV r6, r6, LSL r5 @L_tmp = (L_tmp1 << exp)
RSB r5, r5, #30 @exp_norm = 30 - exp
@r10 --- exp_corr, r11 --- corr
@r6 --- L_tmp, r5 --- exp_norm
@Isqrt_n(&L_tmp, &exp_norm)
MOV r14, r0
MOV r12, r1
STMFD sp!, {r0 - r4, r7 - r12, r14}
ADD r1, sp, #4
ADD r0, sp, #0
STR r6, [sp]
STRH r5, [sp, #4]
BL Isqrt_n
LDR r6, [sp]
LDRSH r5, [sp, #4]
LDMFD sp!, {r0 - r4, r7 - r12, r14}
MOV r0, r14
MOV r1, r12
MOV r6, r6, ASR #16 @norm = extract_h(L_tmp)
MUL r12, r6, r11
ADD r12, r12, r12 @L_tmp = vo_L_mult(corr, norm)
ADD r6, r10, r5
ADD r6, r6, r7 @exp_corr + exp_norm + scale
CMP r6, #0
RSBLT r6, r6, #0
MOVLT r12, r12, ASR r6
MOVGT r12, r12, LSL r6 @L_tmp = L_shl(L_tmp, exp_corr + exp_norm + scale)
ADD r12, r12, r8
MOV r12, r12, ASR #16 @vo_round(L_tmp)
LDR r5, [r13, #CORR_NORM] @ get corr_norm address
LDR r6, [r13, #T_MAX] @ get t_max
ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address
STRH r12, [r10] @ corr_norm[t] = vo_round(L_tmp)
CMP r4, r6
BEQ Norm_corr_asm_end
ADD r4, r4, #1 @ t_min ++
RSB r5, r4, #0 @ k
MOV r6, #63 @ i = 63
MOV r8, r0 @ exc[]
MOV r9, r2 @ h[]
ADD r10, r13, #20 @ excf[]
ADD r8, r8, r5, LSL #1 @ exc[k] address
ADD r9, r9, r6, LSL #1 @ h[i] address
ADD r10, r10, r6, LSL #1 @ excf[i] address
LDRSH r11, [r8] @ tmp = exc[k]
LOOPK:
LDRSH r8, [r9], #-2 @ load h[i]
LDRSH r12, [r10, #-2] @ load excf[i - 1]
MUL r14, r11, r8
MOV r8, r14, ASR #15
ADD r14, r8, r12
STRH r14, [r10], #-2
SUBS r6, r6, #1
BGT LOOPK
LDRSH r8, [r9] @ load h[0]
MUL r14, r11, r8
LDR r6, [r13, #T_MAX] @ get t_max
MOV r8, r14, ASR #15
STRH r8, [r10]
CMP r4, r6
BLE LOOPFOR
Norm_corr_asm_end:
ADD r13, r13, #voSTACK
LDMFD r13!, {r4 - r12, r15}
.end
|
ProjectEverest/frameworks_av | 7,778 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Filt_6k_7k_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@void Filt_6k_7k(
@ Word16 signal[], /* input: signal */
@ Word16 lg, /* input: length of input */
@ Word16 mem[] /* in/out: memory (size=30) */
@)
@***********************************************************************
@ r0 --- signal[]
@ r1 --- lg
@ r2 --- mem[]
.section .text
.global Filt_6k_7k_asm
.extern fir_6k_7k
.hidden fir_6k_7k
Filt_6k_7k_asm:
STMFD r13!, {r0 - r12, r14}
SUB r13, r13, #240 @ x[L_SUBFR16k + (L_FIR - 1)]
MOV r8, r0 @ copy signal[] address
MOV r5, r2 @ copy mem[] address
MOV r0, r2
MOV r1, r13
VLD1.S16 {D0, D1, D2, D3}, [r0]!
VLD1.S16 {D4, D5, D6, D7}, [r0]!
VST1.S16 {D0, D1, D2, D3}, [r1]!
VST1.S16 {D4, D5, D6}, [r1]!
VST1.S16 D7[0], [r1]!
VST1.S16 D7[1], [r1]!
ADR r3, Lable1 @ get fir_7k address
LDR r10, [r3]
ADD r10, r3
MOV r3, r8 @ change myMemCopy to Copy, due to Copy will change r3 content
ADD r6, r13, #60 @ get x[L_FIR - 1] address
MOV r7, r3 @ get signal[i]
@for (i = lg - 1@ i >= 0@ i--)
@{
@ x[i + L_FIR - 1] = signal[i] >> 2@
@}
VLD1.S16 {Q0, Q1}, [r7]! @ signal[0] ~ signal[15]
VLD1.S16 {Q2, Q3}, [r7]! @ signal[16] ~ signal[31]
VLD1.S16 {Q4, Q5}, [r7]! @ signal[32] ~ signal[47]
VLD1.S16 {Q6, Q7}, [r7]! @ signal[48] ~ signal[63]
VLD1.S16 {Q8, Q9}, [r7]! @ signal[64] ~ signal[79]
VSHR.S16 Q10, Q0, #2
VSHR.S16 Q11, Q1, #2
VSHR.S16 Q12, Q2, #2
VSHR.S16 Q13, Q3, #2
VST1.S16 {Q10, Q11}, [r6]!
VSHR.S16 Q0, Q4, #2
VSHR.S16 Q1, Q5, #2
VSHR.S16 Q10, Q6, #2
VSHR.S16 Q11, Q7, #2
VSHR.S16 Q2, Q8, #2
VSHR.S16 Q3, Q9, #2
VST1.S16 {Q12, Q13}, [r6]!
VST1.S16 {Q0, Q1}, [r6]!
VST1.S16 {Q10, Q11}, [r6]!
VST1.S16 {Q2, Q3}, [r6]!
MOV r12, r5
@STR r5, [sp, #-4] @ PUSH r5 to stack
@ not use registers: r4, r10, r12, r14, r5
MOV r4, r13
MOV r5, #0 @ i = 0
@ r4 --- x[i], r10 ---- fir_6k_7k
VLD1.S16 {Q0, Q1}, [r10]! @fir_6k_7k[0] ~ fir_6k_7k[15]
VLD1.S16 {Q2, Q3}, [r10]! @fir_6k_7k[16] ~ fir_6k_7k[31]
VMOV.S16 D7[3], r5 @set fir_6k_7K = 0
VLD1.S16 {Q4, Q5}, [r4]! @x[0] ~ x[15]
VLD1.S16 {Q6, Q7}, [r4]! @x[16] ~ X[31]
VLD1.S16 {Q8}, [r4]!
VMOV.S16 Q15, #0
LOOP_6K7K:
VMULL.S16 Q9,D8,D0[0]
VMULL.S16 Q10,D9,D1[0]
VMULL.S16 Q11,D9,D0[0]
VMULL.S16 Q12,D10,D1[0]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[0]
VMLAL.S16 Q10,D11,D3[0]
VMLAL.S16 Q11,D11,D2[0]
VMLAL.S16 Q12,D12,D3[0]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[0]
VMLAL.S16 Q10,D13,D5[0]
VMLAL.S16 Q11,D13,D4[0]
VMLAL.S16 Q12,D14,D5[0]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[0]
VMLAL.S16 Q10,D15,D7[0]
VMLAL.S16 Q11,D15,D6[0]
VMLAL.S16 Q12,D16,D7[0]
VEXT.8 Q7,Q7,Q8,#2
VMLAL.S16 Q9,D8,D0[1]
VMLAL.S16 Q10,D9,D1[1]
VEXT.8 Q8,Q8,Q15,#2
VMLAL.S16 Q11,D9,D0[1]
VMLAL.S16 Q12,D10,D1[1]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[1]
VMLAL.S16 Q10,D11,D3[1]
VMLAL.S16 Q11,D11,D2[1]
VMLAL.S16 Q12,D12,D3[1]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[1]
VMLAL.S16 Q10,D13,D5[1]
VMLAL.S16 Q11,D13,D4[1]
VMLAL.S16 Q12,D14,D5[1]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[1]
VMLAL.S16 Q10,D15,D7[1]
VMLAL.S16 Q11,D15,D6[1]
VMLAL.S16 Q12,D16,D7[1]
VEXT.8 Q7,Q7,Q8,#2
VMLAL.S16 Q9,D8,D0[2]
VMLAL.S16 Q10,D9,D1[2]
VEXT.8 Q8,Q8,Q15,#2
VMLAL.S16 Q11,D9,D0[2]
VMLAL.S16 Q12,D10,D1[2]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[2]
VMLAL.S16 Q10,D11,D3[2]
VMLAL.S16 Q11,D11,D2[2]
VMLAL.S16 Q12,D12,D3[2]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[2]
VMLAL.S16 Q10,D13,D5[2]
VMLAL.S16 Q11,D13,D4[2]
VMLAL.S16 Q12,D14,D5[2]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[2]
VMLAL.S16 Q10,D15,D7[2]
VMLAL.S16 Q11,D15,D6[2]
VMLAL.S16 Q12,D16,D7[2]
VEXT.8 Q7,Q7,Q8,#2
VMLAL.S16 Q9,D8,D0[3]
VMLAL.S16 Q10,D9,D1[3]
VEXT.8 Q8,Q8,Q15,#2
VMLAL.S16 Q11,D9,D0[3]
VMLAL.S16 Q12,D10,D1[3]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[3]
VMLAL.S16 Q10,D11,D3[3]
VMLAL.S16 Q11,D11,D2[3]
VMLAL.S16 Q12,D12,D3[3]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[3]
VMLAL.S16 Q10,D13,D5[3]
VMLAL.S16 Q11,D13,D4[3]
VMLAL.S16 Q12,D14,D5[3]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[3]
VMLAL.S16 Q10,D15,D7[3]
VMLAL.S16 Q11,D15,D6[3]
VMLAL.S16 Q12,D16,D7[3]
VEXT.8 Q7,Q7,Q8,#2
VMOV.S16 D8,D9
VEXT.8 Q8,Q8,Q15,#2
VMOV.S16 D9,D10
VADD.S32 Q9,Q9,Q10
VMOV.S16 D10,D11
VMOV.S16 D11,D12
VADD.S32 Q11,Q11,Q12
VMOV.S16 D12,D13
VQRSHRN.S32 D28,Q9,#15
VMOV.S16 D13,D14
VMOV.S16 D14,D15
VQRSHRN.S32 D29,Q11,#15
VMOV.S16 D15,D16
VLD1.S16 {Q8},[r4]!
ADD r5, r5, #8
CMP r5, #80
VST1.S16 {D28,D29},[r3]!
BLT LOOP_6K7K
ADD r0, r13, #160 @x + lg
MOV r1, r12
@LDR r1, [sp, #-4] @mem address
VLD1.S16 {D0, D1, D2, D3}, [r0]!
VLD1.S16 {D4, D5, D6, D7}, [r0]!
VST1.S16 {D0, D1, D2, D3}, [r1]!
VST1.S16 {D4, D5, D6}, [r1]!
VST1.S16 D7[0], [r1]!
VST1.S16 D7[1], [r1]!
Filt_6k_7k_end:
ADD r13, r13, #240
LDMFD r13!, {r0 - r12, r15}
Lable1:
.word fir_6k_7k-Lable1
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,845 | media/module/codecs/amrwb/enc/src/asm/ARMV7/syn_filt_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Syn_filt(
@ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
@ Word16 x[], /* (i) : input signal */
@ Word16 y[], /* (o) : output signal */
@ Word16 mem[], /* (i/o) : memory associated with this filtering. */
@)
@***********************************************************************
@ a[] --- r0
@ x[] --- r1
@ y[] --- r2
@ mem[] --- r3
@ m --- 16 lg --- 80 update --- 1
.section .text
.global Syn_filt_asm
Syn_filt_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #700 @ y_buf[L_FRAME16k + M16k]
MOV r4, r3 @ copy mem[] address
MOV r5, r13 @ copy yy = y_buf address
@ for(i = 0@ i < m@ i++)
@{
@ *yy++ = mem[i]@
@}
VLD1.S16 {D0, D1, D2, D3}, [r4]! @load 16 mems
VST1.S16 {D0, D1, D2, D3}, [r5]! @store 16 mem[] to *yy
LDRSH r5, [r0], #2 @ load a[0]
MOV r8, #0 @ i = 0
MOV r5, r5, ASR #1 @ a0 = a[0] >> 1
VMOV.S16 D8[0], r5
@ load all a[]
VLD1.S16 {D0, D1, D2, D3}, [r0]! @ load a[1] ~ a[16]
VREV64.16 D0, D0
VREV64.16 D1, D1
VREV64.16 D2, D2
VREV64.16 D3, D3
MOV r8, #0 @ loop times
MOV r10, r13 @ temp = y_buf
ADD r4, r13, #32 @ yy[i] address
VLD1.S16 {D4, D5, D6, D7}, [r10]! @ first 16 temp_p
SYN_LOOP:
LDRSH r6, [r1], #2 @ load x[i]
MUL r12, r6, r5 @ L_tmp = x[i] * a0
ADD r10, r4, r8, LSL #1 @ y[i], yy[i] address
VDUP.S32 Q10, r12
VMULL.S16 Q5, D3, D4
VMLAL.S16 Q5, D2, D5
VMLAL.S16 Q5, D1, D6
VMLAL.S16 Q5, D0, D7
VEXT.8 D4, D4, D5, #2
VEXT.8 D5, D5, D6, #2
VEXT.8 D6, D6, D7, #2
VPADD.S32 D12, D10, D11
ADD r8, r8, #1
VPADD.S32 D10, D12, D12
VDUP.S32 Q7, D10[0]
VSUB.S32 Q9, Q10, Q7
VQRSHRN.S32 D20, Q9, #12
VMOV.S16 r9, D20[0]
VEXT.8 D7, D7, D20, #2
CMP r8, #80
STRH r9, [r10] @ yy[i]
STRH r9, [r2], #2 @ y[i]
BLT SYN_LOOP
@ update mem[]
ADD r5, r13, #160 @ yy[64] address
VLD1.S16 {D0, D1, D2, D3}, [r5]!
VST1.S16 {D0, D1, D2, D3}, [r3]!
Syn_filt_asm_end:
ADD r13, r13, #700
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 4,847 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Dot_p_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@Word32 Dot_product12( /* (o) Q31: normalized result (1 < val <= -1) */
@ Word16 x[], /* (i) 12bits: x vector */
@ Word16 y[], /* (i) 12bits: y vector */
@ Word16 lg, /* (i) : vector length */
@ Word16 * exp /* (o) : exponent of result (0..+30) */
@)
@************************************************************************
@ x[] --- r0
@ y[] --- r1
@ lg --- r2
@ *exp --- r3
.section .text
.global Dot_product12_asm
Dot_product12_asm:
STMFD r13!, {r4 - r12, r14}
CMP r0, r1
BEQ LOOP_EQ
VLD1.S16 {Q0, Q1}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q2, Q3}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q4, Q5}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q6, Q7}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q8, Q9}, [r1]! @load 16 Word16 y[]
VLD1.S16 {Q10, Q11}, [r1]! @load 16 Word16 y[]
VLD1.S16 {Q12, Q13}, [r1]! @load 16 Word16 y[]
VMULL.S16 Q15, D16, D0
VMLAL.S16 Q15, D17, D1
VMLAL.S16 Q15, D18, D2
VMLAL.S16 Q15, D19, D3
VLD1.S16 {Q0, Q1}, [r1]! @load 16 Word16 y[]
VMLAL.S16 Q15, D20, D4
VMLAL.S16 Q15, D21, D5
VMLAL.S16 Q15, D22, D6
VMLAL.S16 Q15, D23, D7
VMLAL.S16 Q15, D24, D8
VMLAL.S16 Q15, D25, D9
VMLAL.S16 Q15, D26, D10
VMLAL.S16 Q15, D27, D11
VMLAL.S16 Q15, D0, D12
VMLAL.S16 Q15, D1, D13
VMLAL.S16 Q15, D2, D14
VMLAL.S16 Q15, D3, D15
CMP r2, #64
BEQ Lable1
VLD1.S16 {Q0, Q1}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q2, Q3}, [r1]!
VMLAL.S16 Q15, D4, D0
VMLAL.S16 Q15, D5, D1
VMLAL.S16 Q15, D6, D2
VMLAL.S16 Q15, D7, D3
BL Lable1
LOOP_EQ:
VLD1.S16 {Q0, Q1}, [r0]!
VLD1.S16 {Q2, Q3}, [r0]!
VLD1.S16 {Q4, Q5}, [r0]!
VLD1.S16 {Q6, Q7}, [r0]!
VMULL.S16 Q15, D0, D0
VMLAL.S16 Q15, D1, D1
VMLAL.S16 Q15, D2, D2
VMLAL.S16 Q15, D3, D3
VMLAL.S16 Q15, D4, D4
VMLAL.S16 Q15, D5, D5
VMLAL.S16 Q15, D6, D6
VMLAL.S16 Q15, D7, D7
VMLAL.S16 Q15, D8, D8
VMLAL.S16 Q15, D9, D9
VMLAL.S16 Q15, D10, D10
VMLAL.S16 Q15, D11, D11
VMLAL.S16 Q15, D12, D12
VMLAL.S16 Q15, D13, D13
VMLAL.S16 Q15, D14, D14
VMLAL.S16 Q15, D15, D15
CMP r2, #64
BEQ Lable1
VLD1.S16 {Q0, Q1}, [r0]!
VMLAL.S16 Q15, D0, D0
VMLAL.S16 Q15, D1, D1
VMLAL.S16 Q15, D2, D2
VMLAL.S16 Q15, D3, D3
Lable1:
VQADD.S32 D30, D30, D31
VPADD.S32 D30, D30, D30
VMOV.S32 r12, D30[0]
ADD r12, r12, r12
ADD r12, r12, #1 @ L_sum = (L_sum << 1) + 1
MOV r4, r12
CMP r12, #0
RSBLT r4, r12, #0
CLZ r10, r4
SUB r10, r10, #1 @ sft = norm_l(L_sum)
MOV r0, r12, LSL r10 @ L_sum = L_sum << sft
RSB r11, r10, #30 @ *exp = 30 - sft
STRH r11, [r3]
Dot_product12_end:
LDMFD r13!, {r4 - r12, r15}
.end
|
Project-Mist-OS/packages_modules_Virtualization | 4,679 | vmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
Project-Mist-OS/packages_modules_Virtualization | 1,788 | vmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
Project-Mist-OS/packages_modules_Virtualization | 5,154 | vmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
Project-Mist-OS/packages_modules_Virtualization | 2,259 | rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .DTB_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .TEXT_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (2 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0: .quad .L_BLOCK_RO | .DTB_ADDR // DT provided by VMM
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
Project-Mist-OS/packages_modules_Virtualization | 1,745 | pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
Project-Mist-OS/packages_modules_Virtualization | 1,813 | vmbase/example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
/* level 2 */
0: .quad .L_BLOCK_MEM | 0x80000000 // DT provided by VMM
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
|
ProjectEverest/frameworks_av | 5,111 | media/module/codecs/mp3dec/src/asm/pvmp3_dct_9_gcc.s | @ ------------------------------------------------------------------
@ Copyright (C) 1998-2009 PacketVideo
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ http://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
@ express or implied.
@ See the License for the specific language governing permissions
@ and limitations under the License.
@ -------------------------------------------------------------------
@
@
@ Filename: pvmp3_dct_9_gcc.s
@
@------------------------------------------------------------------------------
@ REVISION HISTORY
@
@
@ Who: Date: MM/DD/YYYY
@ Description:
@
@------------------------------------------------------------------------------
.arm
.align 4
.text
@------------------------------------------------------------------------------
.global pvmp3_dct_9
pvmp3_dct_9:
stmfd sp!,{r4-r11,lr}
ldr r2, [r0, #0x20]
ldr r3, [r0, #0]
ldr r12,[r0, #4]
add r1,r2,r3
sub lr,r2,r3
ldr r3,[r0, #0x1c]
ldr r4,[r0, #0x18]
add r2,r3,r12
ldr r5,[r0,#8]
sub r3,r3,r12
add r12,r4,r5
sub r4,r4,r5
ldr r5,[r0, #0x14]
ldr r7,[r0, #0xc]
ldr r9,[r0, #0x10]
add r6,r5,r7
sub r5,r5,r7
add r7,r1,r12
add r8,r9,r2
add r7,r7,r6
add r10,r7,r8
rsb r7,r8,r7,asr #1
str r7,[r0, #0x18]
rsb r2,r9,r2,asr #1
str r10,[r0,#0]
ldr r11,cos_2pi_9
rsb r7,r2,#0
ldr r10,cos_4pi_9
mov r9,r1,lsl #1
mov r8,r7
@ vec[4] = fxp_mac32_Q32( vec[4], tmp0<<1, cos_2pi_9)@
smlal r1,r8,r11,r9
ldr r11,cos_pi_9
mov r1,r9 @@@@@@ !!!!!!
@ vec[8] = fxp_mac32_Q32( vec[8], tmp0<<1, cos_4pi_9)@
smlal r1,r7,r10,r9
mov r1,r12,lsl #1
@ vec[2] = fxp_mac32_Q32( vec[2], tmp0<<1, cos_pi_9)@
smlal r9,r2,r11,r9
rsb r9,r10,#0
ldr r11,cos_5pi_9
smlal r12,r2,r9,r1
@ vec[2] = fxp_mac32_Q32( vec[2], tmp2<<1, cos_5pi_9)@
ldr r9,cos_2pi_9
mov r12,r1 @@@@@@ !!!!!!
smlal r12,r8,r11,r1
@ vec[8] = fxp_mac32_Q32( vec[8], tmp2<<1, cos_2pi_9)@
smlal r1,r7,r9,r1
mov r1,r6,lsl #1
smlal r12,r7,r11,r1
and r6,r10,r11,asr #14
smlal r12,r8,r6,r1
ldr r10,cos_11pi_18
add r12,r11,r6
smlal r1,r2,r12,r1
ldr r9,cos_8pi_9
str r2,[r0,#8]
mov r1,r5,lsl #1
@ vec[8] = fxp_mac32_Q32( vec[8], tmp3<<1, cos_8pi_9)@
smull r2,r6,r9,r1
str r7,[r0,#0x20]
mov r2,r4,lsl #1
ldr r7,cos_13pi_18
smlal r12,r6,r10,r2
mov r3,r3,lsl #1
@ vec[5] = fxp_mac32_Q32( vec[5], tmp8<<1, cos_13pi_18)@
smlal r12,r6,r7,r3
add r4,r5,r4
mov r12,lr,lsl #1
sub lr,r4,lr
ldr r7,cos_17pi_18
str r8,[r0, #0x10]
ldr r4,cos_pi_6
mov lr,lr,lsl #1
@ vec[1] = fxp_mac32_Q32( vec[1], tmp8<<1, cos_17pi_18)@
smlal r8,r6,r7,r12
@ vec[3] = fxp_mul32_Q32((tmp5 + tmp6 - tmp8)<<1, cos_pi_6)@
smull r5,lr,r4,lr
str r6,[r0, #4]
str lr,[r0, #0xc]
@ vec[5] = fxp_mul32_Q32(tmp5<<1, cos_17pi_18)@
smull r5,lr,r7,r1
rsb r6,r9,#0
@ vec[5] = fxp_mac32_Q32( vec[5], tmp6<<1, cos_7pi_18)@
smlal r5,lr,r6,r2
@ vec[5] = fxp_mac32_Q32( vec[5], tmp7<<1, cos_pi_6)@
smlal r5,lr,r4,r3
@ vec[5] = fxp_mac32_Q32( vec[5], tmp8<<1, cos_13pi_18)@
smlal r5,lr,r10,r12
str lr,[r0, #0x14]
rsb lr,r10,#0
@ vec[7] = fxp_mul32_Q32(tmp5<<1, cos_5pi_18)@
smull r5,r1,lr,r1
@ vec[7] = fxp_mac32_Q32( vec[7], tmp6<<1, cos_17pi_18)@
smlal r2,r1,r7,r2
@ vec[7] = fxp_mac32_Q32( vec[7], tmp7<<1, cos_pi_6)@
smlal r3,r1,r4,r3
@ vec[7] = fxp_mac32_Q32( vec[7], tmp8<<1, cos_11pi_18)@
smlal r12,r1,r9,r12
str r1,[r0, #0x1c]
ldmfd sp!,{r4-r11,pc}
cos_2pi_9:
.word 0x620dbe80
cos_4pi_9:
.word 0x163a1a80
cos_pi_9:
.word 0x7847d900
cos_5pi_9:
.word 0x87b82700
cos_8pi_9:
.word 0xd438af00
cos_11pi_18:
.word 0xadb92280
cos_13pi_18:
.word 0x91261480
cos_17pi_18:
.word 0x81f1d200
cos_pi_6:
.word 0x6ed9eb80
|
ProjectEverest/frameworks_av | 12,906 | media/module/codecs/mp3dec/src/asm/pvmp3_dct_16_gcc.s | @ ------------------------------------------------------------------
@ Copyright (C) 1998-2009 PacketVideo
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ http://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
@ express or implied.
@ See the License for the specific language governing permissions
@ and limitations under the License.
@ -------------------------------------------------------------------
@
@
@ Filename: pvmp3_dct_16_gcc.s
@
@
@------------------------------------------------------------------------------
@ REVISION HISTORY
@
@
@ Who: Date: MM/DD/YYYY
@ Description:
@
@------------------------------------------------------------------------------
.arm
.align 4
.text
.extern pvmp3_dct_16
.extern pvmp3_merge_in_place_N32
.extern pvmp3_split
@------------------------------------------------------------------------------
.global pvmp3_dct_16
pvmp3_dct_16:
stmfd sp!,{r0,r1,r4-r11,lr}
ldr r1,[r0]
ldr r3,[r0,#0x3c]
ldr r12,constant1
sub r2,r1,r3
smull lr,r2,r12,r2
sub sp,sp,#0x1c
str r2,[sp,#0x14]
ldr r2,[r0,#0x1c]
ldr r12,[r0,#0x20]
add r1,r1,r3
sub r3,r2,r12
ldr lr,constant2
mov r3,r3,lsl #3
smull r4,r3,lr,r3
ldr r6,constant5
str r3,[sp]
add r3,r2,r12
sub r2,r1,r3
ldr r12,constant3
add r3,r1,r3
smull lr,r2,r12,r2
ldr r12,[r0,#0x38]
ldr r1,[r0,#4]
ldr lr,constant4
sub r4,r1,r12
add r1,r1,r12
ldr r12,[r0,#0x18]
smull r4,r5,lr,r4
ldr lr,[r0,#0x24]
ldr r10,constant10
sub r4,r12,lr
mov r4,r4,lsl #1
smull r7,r4,r6,r4
add r12,r12,lr
add r7,r1,r12
sub r12,r1,r12
ldr r1,constant6
str r4,[sp,#4]
smull r12,r4,r1,r12
ldr r1,[r0,#8]
ldr r12,[r0,#0x34]
ldr r6,constant7
sub lr,r1,r12
smull r8,lr,r6,lr
add r1,r1,r12
str lr,[sp,#0x10]
ldr r12,[r0,#0x14]
ldr lr,[r0,#0x28]
ldr r8,constant8
sub r6,r12,lr
mov r6,r6,lsl #1
smull r9,r6,r8,r6
add r12,r12,lr
ldr r9,constant9
add r8,r1,r12
sub r12,r1,r12
smull r12,lr,r9,r12
ldr r12,[r0,#0x30]
ldr r1,[r0,#0xc]
sub r9,r1,r12
smull r11,r9,r10,r9
add r12,r1,r12
str r9,[sp,#0xc]
ldr r9,[r0,#0x10]
ldr r10,constant11
str r9,[sp,#0x18]
ldr r1,[r0,#0x2c]
sub r9,r9,r1
smull r11,r9,r10,r9
ldr r10,constant12
str r9,[sp,#8]
ldr r9,[sp,#0x18]
ldr r11,constant14
add r9,r9,r1
add r1,r12,r9
sub r12,r12,r9
mov r12,r12,lsl #2
smull r9,r12,r10,r12
ldr r10,constant13
add r9,r3,r1
sub r1,r3,r1
smull r1,r3,r10,r1
sub r1,r7,r8
mov r1,r1,lsl #1
smull r1,r10,r11,r1
add r1,r7,r8
add r8,r9,r1
sub r7,r9,r1
mov r8,r8,asr #1
ldr r1,constant15
str r8,[r0]
smull r7,r8,r1,r7
sub r7,r3,r10
str r8,[r0,#0x20]
mov r7,r7,lsl #1
smull r8,r7,r1,r7
add r3,r3,r10
add r3,r3,r7
str r3,[r0,#0x10]
sub r3,r2,r12
str r7,[r0,#0x30]
add r2,r2,r12
ldr r7,constant13
sub r12,r4,lr
mov r3,r3,lsl #1
smull r8,r3,r7,r3
add lr,r4,lr
sub r4,r2,lr
mov r12,r12,lsl #2
smull r7,r12,r11,r12
add lr,lr,r2
sub r2,r3,r12
mov r2,r2,lsl #1
smull r7,r2,r1,r2
mov r4,r4,lsl #1
add r12,r12,r2
add r3,r12,r3
smull r7,r4,r1,r4
add r12,r3,lr
add r3,r3,r4
str r3,[r0,#0x18]
add r3,r2,r4
str r2,[r0,#0x38]
str r3,[r0,#0x28]
str r12,[r0,#8]
ldr r2,[sp,#0x14]
ldr r3,[sp,#0]
ldr lr,[sp,#4]
sub r2,r2,r3
ldr r3,constant3
mov r2,r2,lsl #1
smull r12,r2,r3,r2
ldr r3,[sp,#0x14]
ldr r12,[sp,#0]
ldr r4,constant6
add r12,r3,r12
ldr r3,[sp,#4]
sub lr,r5,lr
mov lr,lr,lsl #1
add r3,r5,r3
smull r5,lr,r4,lr
ldr r4,[sp,#0x10]
ldr r5,[sp,#0x10]
add r4,r4,r6
sub r5,r5,r6
ldr r6,constant9
mov r5,r5,lsl #1
smull r7,r5,r6,r5
ldr r6,[sp,#8]
ldr r9,[sp,#0xc]
ldr r10,constant12
sub r6,r9,r6
mov r6,r6,lsl #3
smull r7,r6,r10,r6
ldr r8,[sp,#0x20]
ldr r7,[sp,#8]
cmp r8,#0
add r7,r9,r7
bne no_flag_proc
rsb r12,r12,#0
rsb r2,r2,#0
rsb r3,r3,#0
rsb lr,lr,#0
rsb r4,r4,#0
rsb r5,r5,#0
rsb r7,r7,#0
rsb r6,r6,#0
no_flag_proc:
sub r8,r2,r6
add r2,r6,r2
sub r6,r12,r7
ldr r9,constant13
add r12,r12,r7
sub r7,r3,r4
mov r6,r6,lsl #1
mov r8,r8,lsl #1
smull r10,r8,r9,r8
add r3,r3,r4
smull r10,r6,r9,r6
sub r4,lr,r5
mov r7,r7,lsl #2
smull r9,r7,r11,r7
add lr,lr,r5
sub r5,r6,r7
add r6,r6,r7
sub r7,r12,r3
add r3,r12,r3
sub r12,r2,lr
mov r4,r4,lsl #2
smull r9,r4,r11,r4
add lr,r2,lr
sub r2,r8,r4
mov r2,r2,lsl #1
mov r5,r5,lsl #1
mov r12,r12,lsl #1
mov r7,r7,lsl #1
smull r9,r5,r1,r5
smull r9,r2,r1,r2
add r6,r6,r5
smull r9,r7,r1,r7
smull r9,r12,r1,r12
add r1,r4,r2
add r1,r1,r8
add lr,lr,r1
add r3,r3,lr
str r3,[r0,#4]
add r3,r6,lr
str r3,[r0,#0xc]
add r1,r1,r12
add r3,r6,r1
add r1,r7,r1
str r1,[r0,#0x1c]
str r3,[r0,#0x14]
add r1,r12,r2
add r3,r7,r1
add r1,r5,r1
str r1,[r0,#0x2c]
str r3,[r0,#0x24]!
add r1,r5,r2
str r1,[r0,#0x10]
str r2,[r0,#0x18]
add sp,sp,#0x24
ldmfd sp!,{r4-r11,pc}
@------------------------------------------------------------------------------
.global pvmp3_merge_in_place_N32
pvmp3_merge_in_place_N32:
stmfd sp!,{r4,lr}
ldr r1,[r0,#0x1c]
ldr r2,[r0,#0x38]
str r1,[r0,#0x38]
ldr r1,[r0,#0x18]
ldr r3,[r0,#0x30]
str r1,[r0,#0x30]
ldr r12,[r0,#0x14]
ldr r1,[r0,#0x28]
str r12,[r0,#0x28]
ldr r12,[r0,#0x10]
ldr lr,[r0,#0x20]
str r12,[r0,#0x20]
ldr r12,[r0,#0xc]
str r12,[r0,#0x18]
ldr r12,[r0,#8]
str r12,[r0,#0x10]
ldr r12,[r0,#4]
str r12,[r0,#8]
ldr r4,[r0,#0x40]
ldr r12,[r0,#0x44]
add r4,r4,r12
str r4,[r0,#4]
str lr,[r0,#0x40]
ldr lr,[r0,#0x48]
add r12,lr,r12
str r12,[r0,#0xc]
ldr r12,[r0,#0x4c]
add lr,r12,lr
str lr,[r0,#0x14]
ldr lr,[r0,#0x24]
str lr,[r0,#0x48]
ldr lr,[r0,#0x50]
add r12,lr,r12
str r12,[r0,#0x1c]
ldr r12,[r0,#0x54]
str r1,[r0,#0x50]
add lr,r12,lr
str lr,[r0,#0x24]
ldr r1,[r0,#0x58]
ldr r4,[r0,#0x2c]
ldr lr,[r0,#0x34]
add r12,r1,r12
str r12,[r0,#0x2c]
ldr r12,[r0,#0x5c]
add r1,r12,r1
str r1,[r0,#0x34]
str r4,[r0,#0x58]
ldr r1,[r0,#0x60]
ldr r4,[r0,#0x3c]
add r12,r1,r12
str r12,[r0,#0x3c]
ldr r12,[r0,#0x64]
add r1,r12,r1
str r1,[r0,#0x44]
ldr r1,[r0,#0x68]
add r12,r1,r12
str r12,[r0,#0x4c]
ldr r12,[r0,#0x6c]
add r1,r12,r1
str r1,[r0,#0x54]
ldr r1,[r0,#0x70]
str r3,[r0,#0x60]
add r12,r1,r12
str r12,[r0,#0x5c]
ldr r3,[r0,#0x74]
add r1,r3,r1
str r1,[r0,#0x64]
str lr,[r0,#0x68]
ldr r1,[r0,#0x78]
str r2,[r0,#0x70]
add r3,r1,r3
str r3,[r0,#0x6c]
ldr r2,[r0,#0x7c]
add r1,r1,r2
str r1,[r0,#0x74]
str r4,[r0,#0x78]
ldmfd sp!,{r4,pc}
@------------------------------------------------------------------------------
.global pvmp3_split
pvmp3_split:
stmfd sp!,{r4,r5,lr}
adr r1,constant16
ldr r2,[r1]
add r2,r1
sub r1,r0,#4
mov r3,#3
loop1:
ldr r12,[r0]
ldr lr,[r1]
ldr r4,[r2],#-4
add r5,lr,r12
sub r12,lr,r12
smull r12,lr,r4,r12
str r5,[r1],#-4
mov r12,r12,lsr #27
add r12,r12,lr,lsl #5
str r12,[r0],#4
ldr r12,[r0]
ldr lr,[r1]
ldr r4,[r2],#-4
add r5,lr,r12
sub r12,lr,r12
smull r12,lr,r4,r12
str r5,[r1],#-4
mov r12,r12,lsr #27
add r12,r12,lr,lsl #5
str r12,[r0],#4
subs r3,r3,#1
bne loop1
mov r3,#5
loop2:
ldr r12,[r0]
ldr lr,[r1]
ldr r4,[r2],#-4
add r5,lr,r12
sub r12,lr,r12
mov r12,r12,lsl #1
smull lr,r12,r4,r12
str r5,[r1],#-4
str r12,[r0],#4
ldr r12,[r0]
ldr lr,[r1]
ldr r4,[r2],#-4
add r5,lr,r12
sub r12,lr,r12
mov r12,r12,lsl #1
smull lr,r12,r4,r12
str r5,[r1],#-4
str r12,[r0],#4
subs r3,r3,#1
bne loop2
ldmfd sp!,{r4,r5,pc}
constant1:
.word 0x404f4680
constant2:
.word 0x519e4e00
constant3:
.word 0x4140fb80
constant4:
.word 0x42e13c00
constant5:
.word 0x6e3c9300
constant6:
.word 0x4cf8de80
constant7:
.word 0x48919f80
constant8:
.word 0x43e22480
constant9:
.word 0x73326b80
constant10:
.word 0x52cb0e80
constant11:
.word 0x64e24000
constant12:
.word 0x52036780
constant13:
.word 0x4545ea00
constant14:
.word 0x539eba80
constant15:
.word 0x5a827980
constant16:
.word (CosTable_dct32 + 60)-constant16
CosTable_dct32:
.word 0x4013c280
.word 0x40b34580
.word 0x41fa2d80
.word 0x43f93400
.word 0x46cc1c00
.word 0x4a9d9d00
.word 0x4fae3700
.word 0x56601e80
.word 0x5f4cf700
.word 0x6b6fcf00
.word 0x07c7d1d8
.word 0x095b0350
.word 0x0bdf91b0
.word 0x107655e0
.word 0x1b42c840
.word 0x51852300
|
ProjectEverest/frameworks_av | 10,141 | media/module/codecs/mp3dec/src/asm/pvmp3_mdct_18_gcc.s | @ ------------------------------------------------------------------
@ Copyright (C) 1998-2009 PacketVideo
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ http://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
@ express or implied.
@ See the License for the specific language governing permissions
@ and limitations under the License.
@ -------------------------------------------------------------------
@
@
@ Filename: pvmp3_dct_18_gcc.s
@
@------------------------------------------------------------------------------
@ REVISION HISTORY
@
@
@ Who: Date: MM/DD/YYYY
@ Description:
@
@------------------------------------------------------------------------------
.arm
.align 4
.text
.extern pvmp3_dct_9
@------------------------------------------------------------------------------
.global pvmp3_mdct_18
pvmp3_mdct_18:
stmfd sp!,{r4-r11,lr}
mov r7,r2
adr r2,constdata$1
mov r6,r1
add r3,r2,#0x24
add r12,r3,#0x44
add r1,r0,#0x44
mov r5,r0
@ for ( i=9@ i!=0@ i--)
@ {
mov r4,#9
Loop_1:
@ tmp = *(pt_vec)
@ tmp1 = *(pt_vec_o)
ldr lr,[r0] @@ tmp == lr
ldr r8,[r3],#4 @@ tmp1 == r8
@ tmp = fxp_mul32_Q32( tmp<<1, *(pt_cos++ ))
@ tmp1 = fxp_mul32_Q27( tmp1, *(pt_cos_x--))
mov lr,lr,lsl #1
smull r10,lr,r8,lr
ldr r8,[r12],#-4
ldr r9,[r1]
subs r4,r4,#1
smull r9,r10,r8,r9
mov r8,r9,lsr #27
add r8,r8,r10,lsl #5
@ *(pt_vec++) = tmp + tmp1
@ *(pt_vec_o--) = fxp_mul32_Q28( (tmp - tmp1), *(pt_cos_split++))
add r9,lr,r8
sub r8,lr,r8
ldr lr,[r2],#4
str r9,[r0],#4
smull r8,r9,lr,r8
mov lr,r8,lsr #28
add lr,lr,r9,lsl #4
str lr,[r1],#-4
bne Loop_1
@ }
mov r0,r5 @@ r0 = vec
bl pvmp3_dct_9
add r0,r5,#0x24 @@ r0 = &vec[9]
bl pvmp3_dct_9
ldr r0,[r5,#0x20]
ldr r2,[r5,#0x40]
str r0,[r5,#0x40]
ldr r0,[r5,#0x1c]
ldr r3,[r5,#0x38]
str r0,[r5,#0x38]
ldr r1,[r5,#0x18]
ldr r0,[r5,#0x30]
str r1,[r5,#0x30]
ldr r12,[r5,#0x14]
ldr r1,[r5,#0x28]
str r12,[r5,#0x28]
ldr r12,[r5,#0x10]
str r12,[r5,#0x20]
ldr r12,[r5,#0xc]
str r12,[r5,#0x18]
ldr r12,[r5,#8]
str r12,[r5,#0x10]
ldr r12,[r5,#4]
str r12,[r5,#8]
ldr r12,[r5,#0x24]
sub r12,r12,r1
str r12,[r5,#4]
ldr r12,[r5,#0x2c]
sub r1,r12,r1
str r1,[r5,#0xc]
sub r1,r12,r0
str r1,[r5,#0x14]
ldr r1,[r5,#0x34]
sub r0,r1,r0
str r0,[r5,#0x1c]
sub r0,r1,r3
str r0,[r5,#0x24]
ldr r1,[r5,#0x3c]
sub r3,r1,r3
sub r1,r1,r2
str r1,[r5,#0x34]
str r3,[r5,#0x2c]
ldr r1,[r5,#0x44]
sub r1,r1,r2
str r1,[r5,#0x3c]
ldr r12,[r5,#0]
Loop_2:
add r1,r5,r4,lsl #2
ldr r2,[r1,#0x28]
ldr r3,[r6,r4,lsl #2]
add r0,r0,r2
str r0,[r1,#0x28]
ldr lr,[r7,r4,lsl #2]
ldr r1,[r1,#4]
smlal r0,r3,lr,r0
mov r0,r2
add r2,r12,r1
rsb r2,r2,#0
str r3,[r5,r4,lsl #2]
str r2,[r6,r4,lsl #2]
add r4,r4,#1
cmp r4,#6
mov r12,r1
blt Loop_2
ldr r1,[r5,#0x40]
ldr r2,[r6,#0x18]
add r3,r0,r1
str r3,[r5,#0x40]
ldr lr,[r7,r4,lsl #2]
mov r3,r3,lsl #1
ldr r0,[r5,#0x1c]
smlal r3,r2,lr,r3
add r3,r12,r0
str r2,[r5,#0x18]
ldr r2,[r6,#0x1c]
rsb r3,r3,#0
str r3,[r6,#0x18]
ldr r3,[r5,#0x20]
add r0,r3,r0
rsb r0,r0,#0
str r0,[r6,#0x1c]
ldr r3,[r5,#0x44]
ldr r0,[r6,#0x20]
add r3,r3,r1
mov r1,r2
ldr r10,[r7,#0x1c]
mov r2,r3,lsl #1
smlal r12,r1,r10,r2
str r1,[r5,#0x1c]
ldr r1,[r5,#0x20]
ldr r3,[r5,#0x24]
add r1,r1,r3
rsb r1,r1,#0
str r1,[r6,#0x20]
ldr r1,[r5,#0x44]
ldr r3,[r7,#0x20]
mov r1,r1,lsl #1
smlal r12,r0,r3,r1
ldr lr,[r7,#0x24]
ldr r3,[r6,#0x24]
str r0,[r5,#0x20]
smlal r1,r3,lr,r1
ldr r0,[r6,#0x40]
ldr r12,[r6,#0x44]
str r3,[r5,#0x24]
ldr r1,[r5,#0x28]
ldr r3,[r7,#0x44]
mov r1,r1,lsl #1
smlal r1,r12,r3,r1
ldr r1,[r5,#0x40]
str r12,[r5,#0x44]
rsb r8,r1,#0
str r8,[r5,#0x28]
ldr r1,[r5,#0x2c]
ldr r3,[r7,#0x40]
mov r1,r1,lsl #1
smlal r1,r0,r3,r1
str r0,[r5,#0x40]
ldr r0,[r5,#0x3c]
ldr r1,[r6,#0x38]
ldr r3,[r6,#0x3c]
rsb r9,r0,#0
str r9,[r5,#0x2c]
ldr r0,[r5,#0x30]
ldr r12,[r7,#0x3c]
mov r0,r0,lsl #1
smlal r0,r3,r12,r0
str r3,[r5,#0x3c]
ldr r0,[r5,#0x38]
rsb r0,r0,#0
str r0,[r5,#0x30]
ldr r3,[r5,#0x34]
ldr r12,[r7,#0x38]
mov r3,r3,lsl #1
smlal r3,r1,r12,r3
mov r0,r0,lsl #1
str r1,[r5,#0x38]
ldr r4,[r7,#0x34]
ldr r1,[r6,#0x34]
ldr r3,[r6,#0x30]
smlal r0,r1,r4,r0
ldr r12,[r6,#0x2c]
ldr lr,[r6,#0x28]
str r1,[r5,#0x34]
ldr r1,[r7,#0x30]
mov r0,r9,lsl #1
smlal r0,r3,r1,r0
mov r0,r8,lsl #1
ldr r1,[r7,#0x2c]
str r3,[r5,#0x30]
smlal r0,r12,r1,r0
ldr r0,[r7,#0x28]
str r12,[r5,#0x2c]
smlal r2,lr,r0,r2
str lr,[r5,#0x28]
ldr r1,[r6,#4]
ldr r12,[r7,#0x48]
mov r2,r1,lsl #1
ldr r1,[r6,#0x20]
ldr r0,[r6,#0]
mov r1,r1,lsl #1
smull r4,lr,r12,r1
ldr r3,[r6,#0x1c]
str lr,[r6,#0]
ldr r12,[r7,#0x4c]
mov r3,r3,lsl #1
smull r4,lr,r12,r3
mov r0,r0,lsl #1
ldr r12,[r7,#0x64]
str lr,[r6,#4]
smull r4,lr,r12,r2
ldr r12,[r7,#0x68]
str lr,[r6,#0x1c]
smull r4,lr,r12,r0
ldr r12,[r7,#0x6c]
str lr,[r6,#0x20]
smull lr,r0,r12,r0
ldr r12,[r7,#0x70]
str r0,[r6,#0x24]
smull r0,r2,r12,r2
ldr r0,[r7,#0x88]
str r2,[r6,#0x28]
smull r3,r2,r0,r3
ldr r0,[r7,#0x8c]
str r2,[r6,#0x40]
smull r2,r1,r0,r1
str r1,[r6,#0x44]
ldr r0,[r6,#0x18]
ldr lr,[r7,#0x50]
mov r1,r0,lsl #1
ldr r0,[r6,#0x14]
smull r5,r4,lr,r1
mov r3,r0,lsl #1
ldr r0,[r6,#0x10]
mov r12,r0,lsl #1
ldr r0,[r6,#0xc]
mov r2,r0,lsl #1
ldr r0,[r6,#8]
str r4,[r6,#8]
ldr lr,[r7,#0x54]
mov r0,r0,lsl #1
smull r5,r4,lr,r3
ldr lr,[r7,#0x58]
str r4,[r6,#0xc]
smull r5,r4,lr,r12
ldr lr,[r7,#0x5c]
str r4,[r6,#0x10]
smull r5,r4,lr,r2
ldr lr,[r7,#0x60]
str r4,[r6,#0x14]
smull r5,r4,lr,r0
ldr lr,[r7,#0x74]
str r4,[r6,#0x18]
smull r4,r0,lr,r0
ldr lr,[r7,#0x78]
str r0,[r6,#0x2c]
smull r0,r2,lr,r2
ldr r0,[r7,#0x7c]
str r2,[r6,#0x30]
smull r12,r2,r0,r12
ldr r0,[r7,#0x80]
str r2,[r6,#0x34]
smull r3,r2,r0,r3
ldr r0,[r7,#0x84]
str r2,[r6,#0x38]
smull r2,r1,r0,r1
str r1,[r6,#0x3c]
ldmfd sp!,{r4-r11,pc}
@------------------------------------------------------------------------------
constdata$1:
cosTerms_dct18:
.word 0x0807d2b0
.word 0x08483ee0
.word 0x08d3b7d0
.word 0x09c42570
.word 0x0b504f30
.word 0x0df29440
.word 0x12edfb20
.word 0x1ee8dd40
.word 0x5bca2a00
cosTerms_1_ov_cos_phi:
.word 0x400f9c00
.word 0x408d6080
.word 0x418dcb80
.word 0x431b1a00
.word 0x4545ea00
.word 0x48270680
.word 0x4be25480
.word 0x50ab9480
.word 0x56ce4d80
.word 0x05ebb630
.word 0x06921a98
.word 0x0771d3a8
.word 0x08a9a830
.word 0x0a73d750
.word 0x0d4d5260
.word 0x127b1ca0
.word 0x1ea52b40
.word 0x5bb3cc80
|
ProjectEverest/frameworks_av | 6,280 | media/module/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_gcc.s | @ ------------------------------------------------------------------
@ Copyright (C) 1998-2009 PacketVideo
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ http://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
@ express or implied.
@ See the License for the specific language governing permissions
@ and limitations under the License.
@ -------------------------------------------------------------------
@
@
@ Filename: pvmp3_polyphase_filter_window.s
@
@------------------------------------------------------------------------------
@ REVISION HISTORY
@
@
@ Who: Date: MM/DD/YYYY
@ Description:
@
@------------------------------------------------------------------------------
.arm
.align 4
.text
.extern pqmfSynthWin
.hidden pqmfSynthWin
@------------------------------------------------------------------------------
.global pvmp3_polyphase_filter_window
pvmp3_polyphase_filter_window:
stmfd sp!,{r0-r2,r4-r11,lr}
sub sp,sp,#4
adr r2,PolyPh_filter_coeff
ldr r1,[r2]
add r1,r2
ldr r2,[sp,#0xc]
sub r2,r2,#1
mov r10,#1
str r2,[sp]
@ Accumulators r9, r11::> Initialization
Loop_j:
mov r9, #0x20
mov r11, #0x20
mov r4, #0x10
Loop_i:
add r2,r4,r10
add r3,r0,r2,lsl #2
sub r2,r4,r10
ldr r5,[r3]
ldr lr,[r1]
add r12,r0,r2,lsl #2
ldr r6,[r12,#0x780]
smlal r2,r9,lr,r5
smlal r2,r11,lr,r6
ldr r2,[r1,#4]
ldr r7,[r12,#0x80]
smlal r5,r11,r2,r5
smull r6,r5,r2,r6
sub r9,r9,r5
ldr r5,[r1,#8]
ldr r8,[r3,#0x700]
add r4,r4,#0x200
smlal r6,r9,r5,r7
smull r6,r2,r5,r8
ldr r5,[r1,#0xc]
sub r11,r11,r2
smlal r8,r9,r5,r8
smlal r7,r11,r5,r7
ldr r5,[r3,#0x100]
ldr r2,[r1,#0x10]
ldr r6,[r12,#0x680]
smlal lr,r9,r2,r5
smlal lr,r11,r2,r6
ldr r2,[r1,#0x14]
ldr r7,[r12,#0x180]
smlal r5,r11,r2,r5
smull r6,r5,r2,r6
ldr r6,[r1,#0x18]
ldr r8,[r3,#0x600]
sub r9,r9,r5
smlal r5,r9,r6,r7
smull r2,r5,r6,r8
ldr r6,[r1,#0x1c]
sub r11,r11,r5
smlal r8,r9,r6,r8
ldr r2,[r1,#0x20]
ldr r5,[r3,#0x200]
smlal r7,r11,r6,r7
ldr r6,[r12,#0x580]
smlal lr,r9,r2,r5
smlal lr,r11,r2,r6
ldr r2,[r1,#0x24]
ldr r7,[r12,#0x280]
smlal r5,r11,r2,r5
smull r6,r5,r2,r6
ldr r6,[r1,#0x28]
ldr r8,[r3,#0x500]
sub r9,r9,r5
smlal r5,r9,r6,r7
smull r2,r5,r6,r8
ldr r6,[r1,#0x2c]
sub r11,r11,r5
smlal r8,r9,r6,r8
smlal r7,r11,r6,r7
ldr r5,[r3,#0x300]
ldr r8,[r1,#0x30]
ldr r6,[r12,#0x480]
smlal r7,r9,r8,r5
smlal r7,r11,r8,r6
ldr r8,[r1,#0x34]
ldr r12,[r12,#0x380]
smlal r5,r11,r8,r5
smull r6,r5,r8,r6
ldr r6,[r1,#0x38]
ldr r3,[r3,#0x400]
sub r9,r9,r5
smlal r7,r9,r6,r12
smull r8,r7,r6,r3
cmp r4,#0x210
sub r11,r11,r7
ldr r2,[r1,#0x3c]
add r1,r1,#0x40
smlal r3,r9,r2,r3
smlal r12,r11,r2,r12
blt Loop_i
mov r3,r9, asr #6
mov r4,r3, asr #15
teq r4,r3, asr #31
ldr r12,LOW_16BITS
ldr r2,[sp]
eorne r3,r12,r3,asr #31
ldr r4,[sp,#8]
mov r2,r10,lsl r2
add r4,r4,r2,lsl #1
strh r3,[r4]
mov r3,r11,asr #6
mov r4,r3,asr #15
teq r4,r3,asr #31
eorne r3,r12,r3,asr #31
ldr r12,[sp,#0xc]
ldr r11,[sp,#8]
rsb r2,r2,r12,lsl #5
add r2,r11,r2,lsl #1
strh r3,[r2]
add r10,r10,#1
cmp r10,#0x10
blt Loop_j
@ Accumulators r4, r5 Initialization
mov r4,#0x20
mov r5,#0x20
mov r3,#0x10
PolyPh_filter_loop2:
add r2,r0,r3,lsl #2
ldr r12,[r2]
ldr r8,[r1]
ldr r6,[r2,#0x80]
smlal r12,r4,r8,r12
ldr r12,[r1,#4]
ldr r7,[r2,#0x40]
smlal r6,r4,r12,r6
ldr r12,[r1,#8]
ldr r6,[r2,#0x180]
smlal r7,r5,r12,r7
ldr r12,[r2,#0x100]
ldr r7,[r1,#0xc]
ldr r2,[r2,#0x140]
smlal r12,r4,r7,r12
ldr r12,[r1,#0x10]
add r3,r3,#0x80
smlal r6,r4,r12,r6
ldr r6,[r1,#0x14]
cmp r3,#0x210
smlal r2,r5,r6,r2
add r1,r1,#0x18
blt PolyPh_filter_loop2
mov r0,r4,asr #6
mov r2,r0,asr #15
teq r2,r0,asr #31
ldrne r12,LOW_16BITS
ldr r1,[sp,#8]
eorne r0,r12,r0,asr #31
strh r0,[r1,#0]
mov r0,r5,asr #6
mov r2,r0,asr #15
teq r2,r0,asr #31
ldrne r12,LOW_16BITS
ldr r2,[sp]
mov r1,#0x10
eorne r0,r12,r0,asr #31
ldr r12,[sp,#8]
mov r1,r1,lsl r2
add r1,r12,r1,lsl #1
strh r0,[r1]
add sp,sp,#0x10
ldmfd sp!,{r4-r11,pc}
PolyPh_filter_coeff:
.word pqmfSynthWin-PolyPh_filter_coeff
LOW_16BITS:
.word 0x00007fff
|
ProjectEverest/frameworks_av | 2,769 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/Dot_p_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@Word32 Dot_product12( /* (o) Q31: normalized result (1 < val <= -1) */
@ Word16 x[], /* (i) 12bits: x vector */
@ Word16 y[], /* (i) 12bits: y vector */
@ Word16 lg, /* (i) : vector length */
@ Word16 * exp /* (o) : exponent of result (0..+30) */
@)
@****************************************************************
@ x[] --- r0
@ y[] --- r1
@ lg --- r2
@ *exp --- r3
.section .text
.global Dot_product12_asm
Dot_product12_asm:
STMFD r13!, {r4 - r12, r14}
MOV r4, #0 @ L_sum = 0
MOV r5, #0 @ i = 0
LOOP:
LDR r6, [r0], #4
LDR r7, [r1], #4
LDR r8, [r0], #4
SMLABB r4, r6, r7, r4
LDR r9, [r1], #4
SMLATT r4, r6, r7, r4
LDR r6, [r0], #4
SMLABB r4, r8, r9, r4
LDR r7, [r1], #4
SMLATT r4, r8, r9, r4
LDR r8, [r0], #4
SMLABB r4, r6, r7, r4
LDR r9, [r1], #4
SMLATT r4, r6, r7, r4
ADD r5, r5, #8
SMLABB r4, r8, r9, r4
CMP r5, r2
SMLATT r4, r8, r9, r4
BLT LOOP
MOV r12, r4, LSL #1
ADD r12, r12, #1 @ L_sum = (L_sum << 1) + 1
MOV r4, r12
CMP r12, #0
RSBLT r4, r12, #0
CLZ r10, r4
SUB r10, r10, #1 @ sft = norm_l(L_sum)
MOV r0, r12, LSL r10 @ L_sum = L_sum << sft
RSB r11, r10, #30 @ *exp = 30 - sft
STRH r11, [r3]
Dot_product12_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 5,676 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/cor_h_vec_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@static void cor_h_vec_012(
@ Word16 h[], /* (i) scaled impulse response */
@ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
@ Word16 track, /* (i) track to use */
@ Word16 sign[], /* (i) sign vector */
@ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
@ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
@ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
@)
@r0 ---- h[]
@r1 ---- vec[]
@r2 ---- track
@r3 ---- sign[]
@r4 ---- rrixix[][NB_POS]
@r5 ---- cor_1[]
@r6 ---- cor_2[]
.section .text
.global cor_h_vec_012_asm
cor_h_vec_012_asm:
STMFD r13!, {r4 - r12, r14}
LDR r4, [r13, #40] @load rrixix[][NB_POS]
ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track]
MOV r4, #0 @i=0
@r0 --- h[], r1 --- vec[], r2 --- pos
@r3 --- sign[], r4 --- i, r7 --- p0
LOOPi:
MOV r5, #0 @L_sum1 = 0
MOV r6, #0 @L_sum2 = 0
ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
MOV r10, r0 @p1 = h
RSB r11, r2, #62 @j=62-pos
LOOPj1:
LDRSH r12, [r10], #2
LDRSH r8, [r9], #2
LDRSH r14, [r9]
SUBS r11, r11, #1
MLA r5, r12, r8, r5
MLA r6, r12, r14, r6
BGE LOOPj1
LDRSH r12, [r10], #2 @*p1++
MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
MLA r5, r12, r14, r5
MOV r14, #0x8000
MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2)
ADD r10, r6, r14
ADD r9, r5, r14
MOV r5, r9, ASR #16
MOV r6, r10, ASR #16
ADD r9, r3, r2, LSL #1 @address of sign[pos]
ADD r8, r7, #32
LDRSH r10, [r9], #2 @sign[pos]
LDRSH r11, [r9] @sign[pos + 1]
MUL r12, r5, r10
MUL r14, r6, r11
MOV r5, r12, ASR #15
MOV r6, r14, ASR #15
LDR r9, [r13, #44]
LDR r12, [r13, #48]
LDRSH r10, [r7], #2 @*p0++
LDRSH r11, [r8] @*p3++
ADD r9, r9, r4, LSL #1
ADD r12, r12, r4, LSL #1
ADD r5, r5, r10
ADD r6, r6, r11
STRH r5, [r9]
STRH r6, [r12]
ADD r2, r2, #4
MOV r5, #0 @L_sum1 = 0
MOV r6, #0 @L_sum2 = 0
ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
MOV r10, r0 @p1 = h
RSB r11, r2, #62 @j=62-pos
ADD r4, r4, #1 @i++
LOOPj2:
LDRSH r12, [r10], #2
LDRSH r8, [r9], #2
LDRSH r14, [r9]
SUBS r11, r11, #1
MLA r5, r12, r8, r5
MLA r6, r12, r14, r6
BGE LOOPj2
LDRSH r12, [r10], #2 @*p1++
MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
MLA r5, r12, r14, r5
MOV r14, #0x8000
MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2)
ADD r10, r6, r14
ADD r9, r5, r14
MOV r5, r9, ASR #16
MOV r6, r10, ASR #16
ADD r9, r3, r2, LSL #1 @address of sign[pos]
ADD r8, r7, #32
LDRSH r10, [r9], #2 @sign[pos]
LDRSH r11, [r9] @sign[pos + 1]
MUL r12, r5, r10
MUL r14, r6, r11
MOV r5, r12, ASR #15
MOV r6, r14, ASR #15
LDR r9, [r13, #44]
LDR r12, [r13, #48]
LDRSH r10, [r7], #2 @*p0++
LDRSH r11, [r8] @*p3++
ADD r9, r9, r4, LSL #1
ADD r12, r12, r4, LSL #1
ADD r5, r5, r10
ADD r6, r6, r11
STRH r5, [r9]
STRH r6, [r12]
ADD r4, r4, #1 @i+1
ADD r2, r2, #4 @pos += STEP
CMP r4, #16
BLT LOOPi
the_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 6,726 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/convolve_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@*void Convolve (
@* Word16 x[], /* (i) : input vector */
@* Word16 h[], /* (i) : impulse response */
@* Word16 y[], /* (o) : output vector */
@* Word16 L /* (i) : vector size */
@*)
@ r0 --- x[]
@ r1 --- h[]
@ r2 --- y[]
@ r3 --- L
.section .text
.global Convolve_asm
Convolve_asm:
STMFD r13!, {r4 - r12, r14}
MOV r3, #0 @ n
MOV r11, #0x8000
LOOP:
ADD r4, r1, r3, LSL #1 @ tmpH address
ADD r5, r3, #1 @ i = n + 1
MOV r6, r0 @ tmpX = x
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
SUB r5, r5, #1
MUL r8, r9, r10
LOOP1:
CMP r5, #0
BLE L1
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
MLA r8, r12, r14, r8
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
SUBS r5, r5, #4
MLA r8, r12, r14, r8
B LOOP1
L1:
ADD r5, r11, r8, LSL #1
MOV r5, r5, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r5, [r2], #2 @y[n]
ADD r4, r1, r3, LSL #1 @tmpH address
ADD r5, r3, #1
MOV r6, r0
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2
LDRSH r12, [r6], #2
LDRSH r14, [r4], #-2
MUL r8, r9, r10
SUB r5, r5, #2
MLA r8, r12, r14, r8
LOOP2:
CMP r5, #0
BLE L2
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
MLA r8, r12, r14, r8
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
SUBS r5, r5, #4
MLA r8, r12, r14, r8
B LOOP2
L2:
ADD r8, r11, r8, LSL #1
MOV r8, r8, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r8, [r2], #2 @y[n]
ADD r4, r1, r3, LSL #1
ADD r5, r3, #1
MOV r6, r0
LDRSH r9, [r6], #2
LDRSH r10, [r4], #-2
LDRSH r12, [r6], #2
LDRSH r14, [r4], #-2
MUL r8, r9, r10
LDRSH r9, [r6], #2
LDRSH r10, [r4], #-2
MLA r8, r12, r14, r8
SUB r5, r5, #3
MLA r8, r9, r10, r8
LOOP3:
CMP r5, #0
BLE L3
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
MLA r8, r12, r14, r8
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
SUBS r5, r5, #4
MLA r8, r12, r14, r8
B LOOP3
L3:
ADD r8, r11, r8, LSL #1
MOV r8, r8, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r8, [r2], #2 @y[n]
ADD r5, r3, #1 @ i = n + 1
ADD r4, r1, r3, LSL #1 @ tmpH address
MOV r6, r0
MOV r8, #0
LOOP4:
CMP r5, #0
BLE L4
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
MLA r8, r12, r14, r8
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2 @ *tmpH--
LDRSH r12, [r6], #2 @ *tmpX++
LDRSH r14, [r4], #-2 @ *tmpH--
MLA r8, r9, r10, r8
SUBS r5, r5, #4
MLA r8, r12, r14, r8
B LOOP4
L4:
ADD r5, r11, r8, LSL #1
MOV r5, r5, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r5, [r2], #2 @y[n]
CMP r3, #64
BLT LOOP
Convolve_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 11,702 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/Syn_filt_32_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Syn_filt_32(
@ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
@ Word16 m, /* (i) : order of LP filter */
@ Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
@ Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
@ Word16 sig_hi[], /* (o) /16 : synthesis high */
@ Word16 sig_lo[], /* (o) /16 : synthesis low */
@ Word16 lg /* (i) : size of filtering */
@)
@***************************************************************
@
@ a[] --- r0
@ m --- r1
@ exc[] --- r2
@ Qnew --- r3
@ sig_hi[] --- r4
@ sig_lo[] --- r5
@ lg --- r6
.section .text
.global Syn_filt_32_asm
Syn_filt_32_asm:
STMFD r13!, {r4 - r12, r14}
LDR r4, [r13, #40] @ get sig_hi[] address
LDR r5, [r13, #44] @ get sig_lo[] address
LDRSH r6, [r0] @ load Aq[0]
ADD r7, r3, #4 @ 4 + Q_new
MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new)
LDR r14, =0xffff
LDRSH r6, [r0, #2] @ load Aq[1]
LDRSH r7, [r0, #4] @ load Aq[2]
LDRSH r8, [r0, #6] @ load Aq[3]
LDRSH r9, [r0, #8] @ load Aq[4]
AND r6, r6, r14
AND r8, r8, r14
ORR r10, r6, r7, LSL #16 @ Aq[2] -- Aq[1]
ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3]
STR r10, [r13, #-4]
STR r11, [r13, #-8]
LDRSH r6, [r0, #10] @ load Aq[5]
LDRSH r7, [r0, #12] @ load Aq[6]
LDRSH r8, [r0, #14] @ load Aq[7]
LDRSH r9, [r0, #16] @ load Aq[8]
AND r6, r6, r14
AND r8, r8, r14
ORR r10, r6, r7, LSL #16 @ Aq[6] -- Aq[5]
ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7]
STR r10, [r13, #-12]
STR r11, [r13, #-16]
LDRSH r6, [r0, #18] @ load Aq[9]
LDRSH r7, [r0, #20] @ load Aq[10]
LDRSH r8, [r0, #22] @ load Aq[11]
LDRSH r9, [r0, #24] @ load Aq[12]
AND r6, r6, r14
AND r8, r8, r14
ORR r10, r6, r7, LSL #16 @ Aq[10] -- Aq[9]
ORR r11, r8, r9, LSL #16 @ Aq[12] -- Aq[11]
STR r10, [r13, #-20]
STR r11, [r13, #-24]
LDRSH r6, [r0, #26] @ load Aq[13]
LDRSH r7, [r0, #28] @ load Aq[14]
LDRSH r8, [r0, #30] @ load Aq[15]
LDRSH r9, [r0, #32] @ load Aq[16]
AND r6, r6, r14
AND r8, r8, r14
ORR r10, r6, r7, LSL #16 @ Aq[14] -- Aq[13]
ORR r11, r8, r9, LSL #16 @ Aq[16] -- Aq[15]
STR r10, [r13, #-28]
STR r11, [r13, #-32]
MOV r8, #0 @ i = 0
LOOP:
LDRSH r6, [r5, #-2] @ load sig_lo[i-1]
LDRSH r7, [r5, #-4] @ load sig_lo[i-2]
LDR r11, [r13, #-4] @ Aq[2] -- Aq[1]
LDRSH r9, [r5, #-6] @ load sig_lo[i-3]
LDRSH r10, [r5, #-8] @ load sig_lo[i-4]
SMULBB r12, r6, r11 @ sig_lo[i-1] * Aq[1]
LDRSH r6, [r5, #-10] @ load sig_lo[i-5]
SMLABT r12, r7, r11, r12 @ sig_lo[i-2] * Aq[2]
LDR r11, [r13, #-8] @ Aq[4] -- Aq[3]
LDRSH r7, [r5, #-12] @ load sig_lo[i-6]
SMLABB r12, r9, r11, r12 @ sig_lo[i-3] * Aq[3]
LDRSH r9, [r5, #-14] @ load sig_lo[i-7]
SMLABT r12, r10, r11, r12 @ sig_lo[i-4] * Aq[4]
LDR r11, [r13, #-12] @ Aq[6] -- Aq[5]
LDRSH r10, [r5, #-16] @ load sig_lo[i-8]
SMLABB r12, r6, r11, r12 @ sig_lo[i-5] * Aq[5]
LDRSH r6, [r5, #-18] @ load sig_lo[i-9]
SMLABT r12, r7, r11, r12 @ sig_lo[i-6] * Aq[6]
LDR r11, [r13, #-16] @ Aq[8] -- Aq[7]
LDRSH r7, [r5, #-20] @ load sig_lo[i-10]
SMLABB r12, r9, r11, r12 @ sig_lo[i-7] * Aq[7]
LDRSH r9, [r5, #-22] @ load sig_lo[i-11]
SMLABT r12, r10, r11, r12 @ sig_lo[i-8] * Aq[8]
LDR r11, [r13, #-20] @ Aq[10] -- Aq[9]
LDRSH r10,[r5, #-24] @ load sig_lo[i-12]
SMLABB r12, r6, r11, r12 @ sig_lo[i-9] * Aq[9]
LDRSH r6, [r5, #-26] @ load sig_lo[i-13]
SMLABT r12, r7, r11, r12 @ sig_lo[i-10] * Aq[10]
LDR r11, [r13, #-24] @ Aq[12] -- Aq[11]
LDRSH r7, [r5, #-28] @ load sig_lo[i-14]
SMLABB r12, r9, r11, r12 @ sig_lo[i-11] * Aq[11]
LDRSH r9, [r5, #-30] @ load sig_lo[i-15]
SMLABT r12, r10, r11, r12 @ sig_lo[i-12] * Aq[12]
LDR r11, [r13, #-28] @ Aq[14] -- Aq[13]
LDRSH r10, [r5, #-32] @ load sig_lo[i-16]
SMLABB r12, r6, r11, r12 @ sig_lo[i-13] * Aq[13]
SMLABT r12, r7, r11, r12 @ sig_lo[i-14] * Aq[14]
LDR r11, [r13, #-32] @ Aq[16] -- Aq[15]
LDRSH r6, [r2],#2 @ load exc[i]
SMLABB r12, r9, r11, r12 @ sig_lo[i-15] * Aq[15]
SMLABT r12, r10, r11, r12 @ sig_lo[i-16] * Aq[16]
MUL r7, r6, r3 @ exc[i] * a0
RSB r14, r12, #0 @ L_tmp
MOV r14, r14, ASR #11 @ L_tmp >>= 11
ADD r14, r14, r7, LSL #1 @ L_tmp += (exc[i] * a0) << 1
LDRSH r6, [r4, #-2] @ load sig_hi[i-1]
LDRSH r7, [r4, #-4] @ load sig_hi[i-2]
LDR r11, [r13, #-4] @ Aq[2] -- Aq[1]
LDRSH r9, [r4, #-6] @ load sig_hi[i-3]
LDRSH r10, [r4, #-8] @ load sig_hi[i-4]
SMULBB r12, r6, r11 @ sig_hi[i-1] * Aq[1]
LDRSH r6, [r4, #-10] @ load sig_hi[i-5]
SMLABT r12, r7, r11, r12 @ sig_hi[i-2] * Aq[2]
LDR r11, [r13, #-8] @ Aq[4] -- Aq[3]
LDRSH r7, [r4, #-12] @ load sig_hi[i-6]
SMLABB r12, r9, r11, r12 @ sig_hi[i-3] * Aq[3]
LDRSH r9, [r4, #-14] @ load sig_hi[i-7]
SMLABT r12, r10, r11, r12 @ sig_hi[i-4] * Aq[4]
LDR r11, [r13, #-12] @ Aq[6] -- Aq[5]
LDRSH r10, [r4, #-16] @ load sig_hi[i-8]
SMLABB r12, r6, r11, r12 @ sig_hi[i-5] * Aq[5]
LDRSH r6, [r4, #-18] @ load sig_hi[i-9]
SMLABT r12, r7, r11, r12 @ sig_hi[i-6] * Aq[6]
LDR r11, [r13, #-16] @ Aq[8] -- Aq[7]
LDRSH r7, [r4, #-20] @ load sig_hi[i-10]
SMLABB r12, r9, r11, r12 @ sig_hi[i-7] * Aq[7]
LDRSH r9, [r4, #-22] @ load sig_hi[i-11]
SMLABT r12, r10, r11, r12 @ sig_hi[i-8] * Aq[8]
LDR r11, [r13, #-20] @ Aq[10] -- Aq[9]
LDRSH r10,[r4, #-24] @ load sig_hi[i-12]
SMLABB r12, r6, r11, r12 @ sig_hi[i-9] * Aq[9]
LDRSH r6, [r4, #-26] @ load sig_hi[i-13]
SMLABT r12, r7, r11, r12 @ sig_hi[i-10] * Aq[10]
LDR r11, [r13, #-24] @ Aq[12] -- Aq[11]
LDRSH r7, [r4, #-28] @ load sig_hi[i-14]
SMLABB r12, r9, r11, r12 @ sig_hi[i-11] * Aq[11]
LDRSH r9, [r4, #-30] @ load sig_hi[i-15]
SMLABT r12, r10, r11, r12 @ sig_hi[i-12] * Aq[12]
LDR r11, [r13, #-28] @ Aq[14] -- Aq[13]
LDRSH r10, [r4, #-32] @ load sig_hi[i-16]
SMLABB r12, r6, r11, r12 @ sig_hi[i-13] * Aq[13]
SMLABT r12, r7, r11, r12 @ sig_hi[i-14] * Aq[14]
LDR r11, [r13, #-32] @ Aq[16] -- Aq[15]
SMLABB r12, r9, r11, r12 @ sig_hi[i-15] * Aq[15]
SMLABT r12, r10, r11, r12 @ sig_hi[i-16] * Aq[16]
ADD r6, r12, r12 @ r12 << 1
SUB r14, r14, r6
MOV r14, r14, LSL #3 @ L_tmp <<=3
MOV r7, r14, ASR #16 @ L_tmp >> 16
MOV r14, r14, ASR #4 @ L_tmp >>=4
STRH r7, [r4], #2 @ sig_hi[i] = L_tmp >> 16
SUB r9, r14, r7, LSL #12 @ sig_lo[i] = L_tmp - (sig_hi[i] << 12)
ADD r8, r8, #1
STRH r9, [r5], #2
CMP r8, #64
BLT LOOP
Syn_filt_32_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 9,349 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/Filt_6k_7k_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@**********************************************************************/
@void Filt_6k_7k(
@ Word16 signal[], /* input: signal */
@ Word16 lg, /* input: length of input */
@ Word16 mem[] /* in/out: memory (size=30) */
@)
@******************************************************************
@ r0 --- signal[]
@ r1 --- lg
@ r2 --- mem[]
.section .text
.global Filt_6k_7k_asm
.extern voAWB_Copy
.extern fir_6k_7k
.hidden fir_6k_7k
Filt_6k_7k_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #240 @ x[L_SUBFR16k + (L_FIR - 1)]
MOV r8, r0 @ copy signal[] address
MOV r4, r1 @ copy lg address
MOV r5, r2 @ copy mem[] address
MOV r1, r13
MOV r0, r2
MOV r2, #30 @ L_FIR - 1
BL voAWB_Copy @ memcpy(x, mem, (L_FIR - 1)<<1)
ADR r3, Lable1 @ get fir_7k address
LDR r10, [r3]
ADD r10, r3
MOV r14, #0
MOV r3, r8 @ change myMemCopy to Copy, due to Copy will change r3 content
ADD r6, r13, #60 @ get x[L_FIR - 1] address
MOV r7, r3 @ get signal[i]
LOOP1:
LDRSH r8, [r7], #2
LDRSH r9, [r7], #2
MOV r8, r8, ASR #2
MOV r9, r9, ASR #2
LDRSH r11, [r7], #2
LDRSH r12, [r7], #2
MOV r11, r11, ASR #2
MOV r12, r12, ASR #2
STRH r8, [r6], #2
STRH r9, [r6], #2
STRH r11, [r6], #2
STRH r12, [r6], #2
LDRSH r8, [r7], #2
LDRSH r9, [r7], #2
MOV r8, r8, ASR #2
MOV r9, r9, ASR #2
LDRSH r11, [r7], #2
LDRSH r12, [r7], #2
MOV r11, r11, ASR #2
MOV r12, r12, ASR #2
STRH r8, [r6], #2
STRH r9, [r6], #2
STRH r11, [r6], #2
STRH r12, [r6], #2
ADD r14, r14, #8
CMP r14, #80
BLT LOOP1
STR r5, [sp, #-4] @ PUSH r5 to stack
@ not use registers: r4, r10, r12, r14, r5
MOV r4, r13
MOV r5, #0 @ i = 0
LOOP2:
LDR r0, [r10]
LDRSH r1, [r4] @ load x[i]
LDRSH r2, [r4, #60] @ load x[i + 30]
LDRSH r6, [r4, #2] @ load x[i + 1]
LDRSH r7, [r4, #58] @ load x[i + 29]
ADD r1, r1, r2 @ x[i] + x[i + 30]
ADD r6, r6, r7 @ x[i + 1] + x[i + 29]
LDRSH r8, [r4, #4] @ load x[i + 2]
LDRSH r9, [r4, #56] @ load x[i + 28]
SMULBB r14, r1, r0 @ (x[i] + x[i + 30]) * fir_7k[0]
ADD r8, r8, r9 @ x[i + 2] + x[i + 28]
SMLABT r14, r6, r0, r14 @ (x[i + 1] + x[i + 29]) * fir_7k[1]
LDR r0, [r10, #4]
LDRSH r1, [r4, #6] @ load x[i+3]
LDRSH r2, [r4, #54] @ load x[i+27]
LDRSH r6, [r4, #8] @ load x[i+4]
LDRSH r7, [r4, #52] @ load x[i+26]
ADD r1, r1, r2 @ x[i+3] + x[i+27]
ADD r6, r6, r7 @ x[i+4] + x[i+26]
SMLABB r14, r8, r0, r14 @ (x[i + 2] + x[i + 28]) * fir_7k[2]
LDRSH r8, [r4, #10] @ load x[i+5]
LDRSH r9, [r4, #50] @ load x[i+25]
SMLABT r14, r1, r0, r14 @ (x[i+3] + x[i+27]) * fir_7k[3]
ADD r8, r8, r9 @ x[i+5] + x[i+25]
LDR r0, [r10, #8]
LDRSH r1, [r4, #12] @ x[i+6]
LDRSH r2, [r4, #48] @ x[i+24]
SMLABB r14, r6, r0, r14 @ (x[i+4] + x[i+26]) * fir_7k[4]
LDRSH r6, [r4, #14] @ x[i+7]
LDRSH r7, [r4, #46] @ x[i+23]
SMLABT r14, r8, r0, r14 @ (x[i+5] + x[i+25]) * fir_7k[5]
LDR r0, [r10, #12]
ADD r1, r1, r2 @ (x[i+6] + x[i+24])
ADD r6, r6, r7 @ (x[i+7] + x[i+23])
SMLABB r14, r1, r0, r14 @ (x[i+6] + x[i+24]) * fir_7k[6]
LDRSH r8, [r4, #16] @ x[i+8]
LDRSH r9, [r4, #44] @ x[i+22]
SMLABT r14, r6, r0, r14 @ (x[i+7] + x[i+23]) * fir_7k[7]
LDR r0, [r10, #16]
LDRSH r1, [r4, #18] @ x[i+9]
LDRSH r2, [r4, #42] @ x[i+21]
LDRSH r6, [r4, #20] @ x[i+10]
LDRSH r7, [r4, #40] @ x[i+20]
ADD r8, r8, r9 @ (x[i+8] + x[i+22])
ADD r1, r1, r2 @ (x[i+9] + x[i+21])
ADD r6, r6, r7 @ (x[i+10] + x[i+20])
SMLABB r14, r8, r0, r14 @ (x[i+8] + x[i+22]) * fir_7k[8]
LDRSH r8, [r4, #22] @ x[i+11]
LDRSH r9, [r4, #38] @ x[i+19]
SMLABT r14, r1, r0, r14 @ (x[i+9] + x[i+21]) * fir_7k[9]
LDR r0, [r10, #20]
LDRSH r1, [r4, #24] @ x[i+12]
LDRSH r2, [r4, #36] @ x[i+18]
SMLABB r14, r6, r0, r14 @ (x[i+10] + x[i+20]) * fir_7k[10]
LDRSH r6, [r4, #26] @ x[i+13]
ADD r8, r8, r9 @ (x[i+11] + x[i+19])
LDRSH r7, [r4, #34] @ x[i+17]
SMLABT r14, r8, r0, r14 @ (x[i+11] + x[i+19]) * fir_7k[11]
LDR r0, [r10, #24]
ADD r1, r1, r2 @ x[i+12] + x[i+18]
LDRSH r8, [r4, #28] @ x[i+14]
SMLABB r14, r1, r0, r14 @ (x[i+12] + x[i+18]) * fir_7k[12]
ADD r6, r6, r7 @ (x[i+13] + x[i+17])
LDRSH r9, [r4, #32] @ x[i+16]
SMLABT r14, r6, r0, r14 @ (x[i+13] + x[i+17]) * fir_7k[13]
LDR r0, [r10, #28]
ADD r8, r8, r9 @ (x[i+14] + x[i+16])
LDRSH r1, [r4, #30] @ x[i+15]
SMLABB r14, r8, r0, r14 @ (x[i+14] + x[i+16]) * fir_7k[14]
SMLABT r14, r1, r0, r14 @ x[i+15] * fir_7k[15]
ADD r5, r5, #1
ADD r14, r14, #0x4000
ADD r4, r4, #2
MOV r1, r14, ASR #15
CMP r5, #80
STRH r1, [r3], #2 @signal[i] = (L_tmp + 0x4000) >> 15
BLT LOOP2
LDR r1, [sp, #-4] @mem address
ADD r0, r13, #160 @x + lg
MOV r2, #30
BL voAWB_Copy
Filt_6k_7k_end:
ADD r13, r13, #240
LDMFD r13!, {r4 - r12, r15}
Lable1:
.word fir_6k_7k-Lable1
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 6,624 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/residu_asm_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Residu(
@ Word16 a[], /* (i) Q12 : prediction coefficients */
@ Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
@ Word16 y[], /* (o) x2 : residual signal */
@ Word16 lg /* (i) : size of filtering */
@ )
@a[] --- r0
@x[] --- r1
@y[] --- r2
@lg --- r3
.section .text
.global Residu_opt
Residu_opt:
STMFD r13!, {r4 - r12, r14}
LDRH r5, [r0], #2
LDRH r6, [r0], #2
ORR r5, r6, r5, LSL #16 @r5 --- a0, a1
LDRH r6, [r0], #2
LDRH r7, [r0], #2
ORR r6, r7, r6, LSL #16 @r6 --- a2, a3
LDRH r7, [r0], #2
LDRH r8, [r0], #2
ORR r7, r8, r7, LSL #16 @r7 --- a4, a5
LDRH r8, [r0], #2
LDRH r9, [r0], #2
ORR r8, r9, r8, LSL #16 @r8 --- a6, a7
LDRH r9, [r0], #2
LDRH r10, [r0], #2
ORR r9, r10, r9, LSL #16 @r9 --- a8, a9
LDRH r10, [r0], #2
LDRH r11, [r0], #2
ORR r10, r11, r10, LSL #16 @r10 --- a10, a11
LDRH r11, [r0], #2
LDRH r12, [r0], #2
ORR r11, r12, r11, LSL #16 @r11 --- a12, a13
LDRH r12, [r0], #2
LDRH r4, [r0], #2
ORR r12, r4, r12, LSL #16 @r12 --- a14, a15
STMFD r13!, {r8 - r12} @store r8-r12
LDRH r4, [r0], #2 @load a16
MOV r14, r3, ASR #2 @one loop get 4 outputs
ADD r1, r1, #4
MOV r0, r2
ORR r14, r4, r14, LSL #16 @r14 --- loopnum, a16
residu_loop:
LDR r10, [r1], #-4 @r10 --- x[3], x[2]
LDR r2, [r1], #-4 @r2 --- x[1], x[0]
SMULTB r3, r5, r2 @i1(0) --- r3 = x[0] * a0
SMULTT r4, r5, r2 @i2(0) --- r4 = x[1] * a0
SMULTB r11, r5, r10 @i3(0) --- r11 = x[2] * a0
SMULTT r12, r5, r10 @i4(0) --- r12 = x[3] * a0
SMLABB r4, r5, r2, r4 @i2(1) --- r4 += x[0] * a1
SMLABT r11, r5, r2, r11 @i3(1) --- r11 += x[1] * a0
SMLABB r12, r5, r10, r12 @i4(1) --- r12 += x[2] * a1
SMLATB r11, r6, r2, r11 @i3(2) --- r11 += x[0] * a2
SMLATT r12, r6, r2, r12 @i4(2) --- r12 += x[1] * a2
SMLABB r12, r6, r2, r12 @i4(3) --- r12 += x[0] * a3
LDR r2, [r1], #-4 @r2 ---- x[-1], x[-2]
SMLABT r3, r5, r2, r3 @i1(1) --- r3 += x[-1] * a1
SMLATT r4, r6, r2, r4 @i2(2) --- r4 += x[-1] * a2
SMLABT r11, r6, r2, r11 @i3(3) --- r11 += x[-1] * a3
SMLATT r12, r7, r2, r12 @i4(4) --- r12 += x[-1] * a4
SMLATB r3, r6, r2, r3 @i1(2) --- r3 += x[-2] * a2
SMLABB r4, r6, r2, r4 @ i2 (3)
SMLATB r11,r7, r2, r11 @ i3 (4)
SMLABB r12,r7, r2, r12 @ i4 (5)
LDR r2,[r1],#-4
SMLABT r3, r6, r2, r3 @ i1 (3)
SMLATT r4, r7, r2, r4 @ i2 (4)
SMLABT r11,r7, r2, r11 @ i3 (5)
SMLATT r12,r8, r2, r12 @ i4 (6)
SMLATB r3, r7, r2, r3 @ i1 (4)
SMLABB r4, r7, r2, r4 @ i2 (5)
SMLATB r11,r8, r2, r11 @ i3 (6)
SMLABB r12,r8, r2, r12 @ i4 (7)
LDR r2,[r1],#-4
SMLABT r3, r7, r2, r3 @ i1 (5)
SMLATT r4, r8, r2, r4 @ i2 (6)
SMLABT r11,r8, r2, r11 @ i3 (7)
SMLATT r12,r9, r2, r12 @ i4 (8)
SMLATB r3, r8, r2, r3 @ i1 (6)
SMLABB r4, r8, r2, r4 @ i2 (7)
SMLATB r11,r9, r2, r11 @ i3 (8)
SMLABB r12,r9, r2, r12 @ i4 (9)
LDR r10, [r13, #8] @ [ a10 | a11]
LDR r2,[r1],#-4
SMLABT r3, r8, r2, r3 @ i1 (7)
SMLATT r4, r9, r2, r4 @ i2 (8)
SMLABT r11,r9, r2, r11 @ i3 (9)
SMLATT r12,r10, r2, r12 @ i4 (10)
SMLATB r3, r9, r2, r3 @ i1 (8)
SMLABB r4, r9, r2, r4 @ i2 (9)
SMLATB r11,r10, r2, r11 @ i3 (10)
SMLABB r12,r10, r2, r12 @ i4 (11)
LDR r8, [r13, #12] @ [ a12 | a13 ]
LDR r2,[r1],#-4
SMLABT r3, r9, r2, r3 @ i1 (9)
SMLATT r4, r10, r2, r4 @ i2 (10)
SMLABT r11,r10, r2, r11 @ i3 (11)
SMLATT r12,r8, r2, r12 @ i4 (12)
SMLATB r3, r10, r2, r3 @ i1 (10)
SMLABB r4, r10, r2, r4 @ i2 (11)
SMLATB r11,r8, r2, r11 @ i3 (12)
SMLABB r12,r8, r2, r12 @ i4 (13)
LDR r9, [r13, #16] @ [ a14 | a15 ]
LDR r2,[r1],#-4
SMLABT r3, r10, r2, r3 @ i1 (11)
SMLATT r4, r8, r2, r4 @ i2 (12)
SMLABT r11,r8, r2, r11 @ i3 (13)
SMLATT r12,r9, r2, r12 @ i4 (14)
SMLATB r3, r8, r2, r3 @ i1 (12)
SMLABB r4, r8, r2, r4 @ i2 (13)
SMLATB r11,r9, r2, r11 @ i3 (14)
SMLABB r12,r9, r2, r12 @ i4 (15)
LDR r2,[r1],#-4
SMLABT r3, r8, r2, r3 @ i1 (13)
SMLATT r4, r9, r2, r4 @ i2 (14)
SMLABT r11,r9, r2, r11 @ i3 (15)
SMLABT r12,r14, r2, r12 @ i4 (16)
SMLATB r3, r9, r2, r3 @ i1 (14)
SMLABB r4, r9, r2, r4 @ i2 (15)
SMLABB r11,r14, r2, r11 @ i3 (16)
LDR r8, [r13] @ [ a6 | a7 ]
LDR r2,[r1],#44 @ Change
SMLABT r3, r9, r2, r3
SMLABB r3, r14, r2, r3
SMLABT r4, r14, r2, r4
LDR r9, [r13, #4] @ [ a8 | a9 ]
QADD r3,r3,r3
QADD r4,r4,r4
QADD r11,r11,r11
QADD r12,r12,r12
QADD r3,r3,r3
QADD r4,r4,r4
QADD r11,r11,r11
QADD r12,r12,r12
QADD r3,r3,r3
QADD r4,r4,r4
QADD r11,r11,r11
QADD r12,r12,r12
QADD r3,r3,r3
QADD r4,r4,r4
QADD r11,r11,r11
QADD r12,r12,r12
MOV r2,#32768
QDADD r3,r2,r3
QDADD r4,r2,r4
QDADD r11,r2,r11
QDADD r12,r2,r12
MOV r3,r3,asr #16
MOV r4,r4,asr #16
MOV r11,r11,asr #16
MOV r12,r12,asr #16
STRH r3,[r0],#2
STRH r4,[r0],#2
STRH r11,[r0],#2
STRH r12,[r0],#2
MOV r2,r14,asr #16
SUB r14, r14, #0x10000
SUBS r2,r2,#1
BNE residu_loop
end:
LDMFD r13!, {r8 -r12}
LDMFD r13!, {r4 -r12,pc}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 2,254 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/scale_sig_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Scale_sig(
@ Word16 x[], /* (i/o) : signal to scale */
@ Word16 lg, /* (i) : size of x[] */
@ Word16 exp /* (i) : exponent: x = round(x << exp) */
@ )
@
@r0 --- x[]
@r1 --- lg
@r2 --- exp
.section .text
.global Scale_sig_opt
Scale_sig_opt:
STMFD r13!, {r4 - r12, r14}
SUB r3, r1, #1 @i = lg - 1
CMP r2, #0 @Compare exp and 0
RSB r7, r2, #0 @exp = -exp
ADD r10, r2, #16 @16 + exp
ADD r4, r0, r3, LSL #1 @x[i] address
MOV r8, #0x7fffffff
MOV r9, #0x8000
BLE LOOP2
LOOP1:
LDRSH r5, [r4] @load x[i]
MOV r12, r5, LSL r10
TEQ r5, r12, ASR r10
EORNE r12, r8, r5, ASR #31
SUBS r3, r3, #1
QADD r11, r12, r9
MOV r12, r11, ASR #16
STRH r12, [r4], #-2
BGE LOOP1
BL The_end
LOOP2:
LDRSH r5, [r4] @load x[i]
MOV r6, r5, LSL #16 @L_tmp = x[i] << 16
MOV r5, r6, ASR r7 @L_tmp >>= exp
QADD r11, r5, r9
MOV r12, r11, ASR #16
SUBS r3, r3, #1
STRH r12, [r4], #-2
BGE LOOP2
The_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 7,933 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/Norm_Corr_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@static void Norm_Corr (Word16 exc[], /* (i) : excitation buffer */
@ Word16 xn[], /* (i) : target vector */
@ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
@ Word16 L_subfr, /* (i) : sub-frame length */
@ Word16 t_min, /* (i) : minimum value of pitch lag. */
@ Word16 t_max, /* (i) : maximum value of pitch lag. */
@ Word16 corr_norm[]) /* (o) Q15 : normalized correlation */
@
@ r0 --- exc[]
@ r1 --- xn[]
@ r2 --- h[]
@ r3 --- L_subfr
@ r4 --- t_min
@ r5 --- t_max
@ r6 --- corr_norm[]
.section .text
.global Norm_corr_asm
.extern Convolve_asm
.extern Isqrt_n
@******************************
@ constant
@******************************
.equ EXC , 0
.equ XN , 4
.equ H , 8
.equ L_SUBFR , 12
.equ voSTACK , 172
.equ T_MIN , 212
.equ T_MAX , 216
.equ CORR_NORM , 220
Norm_corr_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #voSTACK
ADD r8, r13, #20 @get the excf[L_SUBFR]
LDR r4, [r13, #T_MIN] @get t_min
RSB r11, r4, #0 @k = -t_min
ADD r5, r0, r11, LSL #1 @get the &exc[k]
@transfer Convolve function
STMFD sp!, {r0 - r3}
MOV r0, r5
MOV r1, r2
MOV r2, r8 @r2 --- excf[]
BL Convolve_asm
LDMFD sp!, {r0 - r3}
@ r8 --- excf[]
MOV r14, r1 @copy xn[] address
MOV r5, #64
MOV r6, #0 @L_tmp = 0
MOV r7, #1
LOOP1:
LDR r9, [r14], #4
LDR r10, [r14], #4
LDR r11, [r14], #4
LDR r12, [r14], #4
SMLABB r6, r9, r9, r6 @L_tmp += (xn[i] * xn[i])
SMLATT r6, r9, r9, r6 @L_tmp += (xn[i+1] * xn[i+1])
SMLABB r6, r10, r10, r6
SMLATT r6, r10, r10, r6
SMLABB r6, r11, r11, r6
SMLATT r6, r11, r11, r6
SMLABB r6, r12, r12, r6
SMLATT r6, r12, r12, r6
SUBS r5, r5, #8
BNE LOOP1
ADD r9, r7, r6, LSL #1 @L_tmp = (L_tmp << 1) + 1
CLZ r7, r9
SUB r6, r7, #1 @exp = norm_l(L_tmp)
RSB r7, r6, #32 @exp = 32 - exp
MOV r6, r7, ASR #1
RSB r7, r6, #0 @scale = -(exp >> 1)
@loop for every possible period
@for(t = t_min@ t <= t_max@ t++)
@r7 --- scale r4 --- t_min r8 --- excf[]
LOOPFOR:
MOV r5, #0 @L_tmp = 0
MOV r6, #0 @L_tmp1 = 0
MOV r9, #64
MOV r12, r1 @copy of xn[]
ADD r14, r13, #20 @copy of excf[]
MOV r8, #0x8000
LOOPi:
LDR r11, [r14], #4 @load excf[i], excf[i+1]
LDR r10, [r12], #4 @load xn[i], xn[i+1]
SMLABB r6, r11, r11, r6 @L_tmp1 += excf[i] * excf[i]
SMLATT r6, r11, r11, r6 @L_tmp1 += excf[i+1] * excf[i+1]
SMLABB r5, r10, r11, r5 @L_tmp += xn[i] * excf[i]
SMLATT r5, r10, r11, r5 @L_tmp += xn[i+1] * excf[i+1]
LDR r11, [r14], #4 @load excf[i+2], excf[i+3]
LDR r10, [r12], #4 @load xn[i+2], xn[i+3]
SMLABB r6, r11, r11, r6
SMLATT r6, r11, r11, r6
SMLABB r5, r10, r11, r5
SMLATT r5, r10, r11, r5
SUBS r9, r9, #4
BNE LOOPi
@r5 --- L_tmp, r6 --- L_tmp1
MOV r10, #1
ADD r5, r10, r5, LSL #1 @L_tmp = (L_tmp << 1) + 1
ADD r6, r10, r6, LSL #1 @L_tmp1 = (L_tmp1 << 1) + 1
CLZ r10, r5
CMP r5, #0
RSBLT r11, r5, #0
CLZLT r10, r11
SUB r10, r10, #1 @exp = norm_l(L_tmp)
MOV r5, r5, LSL r10 @L_tmp = (L_tmp << exp)
RSB r10, r10, #30 @exp_corr = 30 - exp
MOV r11, r5, ASR #16 @corr = extract_h(L_tmp)
CLZ r5, r6
SUB r5, r5, #1
MOV r6, r6, LSL r5 @L_tmp = (L_tmp1 << exp)
RSB r5, r5, #30 @exp_norm = 30 - exp
@r10 --- exp_corr, r11 --- corr
@r6 --- L_tmp, r5 --- exp_norm
@Isqrt_n(&L_tmp, &exp_norm)
MOV r14, r0
MOV r12, r1
STMFD sp!, {r0 - r4, r7 - r12, r14}
ADD r1, sp, #4
ADD r0, sp, #0
STR r6, [sp]
STRH r5, [sp, #4]
BL Isqrt_n
LDR r6, [sp]
LDRSH r5, [sp, #4]
LDMFD sp!, {r0 - r4, r7 - r12, r14}
MOV r0, r14
MOV r1, r12
MOV r6, r6, ASR #16 @norm = extract_h(L_tmp)
MUL r12, r6, r11
ADD r12, r12, r12 @L_tmp = vo_L_mult(corr, norm)
ADD r6, r10, r5
ADD r6, r6, r7 @exp_corr + exp_norm + scale
CMP r6, #0
RSBLT r6, r6, #0
MOVLT r12, r12, ASR r6
MOVGT r12, r12, LSL r6 @L_tmp = L_shl(L_tmp, exp_corr + exp_norm + scale)
ADD r12, r12, r8
MOV r12, r12, ASR #16 @vo_round(L_tmp)
LDR r5, [r13, #CORR_NORM] @ get corr_norm address
LDR r6, [r13, #T_MAX] @ get t_max
ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address
STRH r12, [r10] @ corr_norm[t] = vo_round(L_tmp)
CMP r4, r6
BEQ Norm_corr_asm_end
ADD r4, r4, #1 @ t_min ++
RSB r5, r4, #0 @ k
MOV r6, #63 @ i = 63
MOV r8, r0 @ exc[]
MOV r9, r2 @ h[]
ADD r10, r13, #20 @ excf[]
ADD r8, r8, r5, LSL #1 @ exc[k] address
ADD r9, r9, r6, LSL #1 @ h[i] address
ADD r10, r10, r6, LSL #1 @ excf[i] address
LDRSH r11, [r8] @ tmp = exc[k]
LOOPK:
LDRSH r8, [r9], #-2 @ load h[i]
LDRSH r12, [r10, #-2] @ load excf[i - 1]
MUL r14, r11, r8
MOV r8, r14, ASR #15
ADD r14, r8, r12
STRH r14, [r10], #-2
SUBS r6, r6, #1
BGT LOOPK
LDRSH r8, [r9] @ load h[0]
MUL r14, r11, r8
LDR r6, [r13, #T_MAX] @ get t_max
MOV r8, r14, ASR #15
STRH r8, [r10]
CMP r4, r6
BLE LOOPFOR
Norm_corr_asm_end:
ADD r13, r13, #voSTACK
LDMFD r13!, {r4 - r12, r15}
.end
|
ProjectEverest/frameworks_av | 19,096 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/pred_lt4_1_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Pred_lt4(
@ Word16 exc[], /* in/out: excitation buffer */
@ Word16 T0, /* input : integer pitch lag */
@ Word16 frac, /* input : fraction of lag */
@ Word16 L_subfr /* input : subframe size */
@ )
@******************************
@ ARM Register
@******************************
@ r0 --- exc[]
@ r1 --- T0
@ r2 --- frac
@ r3 --- L_subfr
.section .text
.global pred_lt4_asm
.extern inter4_2
.hidden inter4_2
pred_lt4_asm:
STMFD r13!, {r4 - r12, r14}
RSB r4, r1, #0 @-T0
RSB r2, r2, #0 @frac = -frac
ADD r5, r0, r4, LSL #1 @x = exc - T0
CMP r2, #0
ADDLT r2, r2, #4 @frac += UP_SAMP
SUBLT r5, r5, #2 @x--
SUB r5, r5, #30 @x -= 15
RSB r4, r2, #3 @k = 3 - frac
ADR r8, Table
NOP @space for fixed up relative address of ADR
LDR r6, [r8]
ADD r6, r8
MOV r8, r4, LSL #6
@MOV r7, #0 @j = 0
ADD r8, r6, r8 @ptr2 = &(inter4_2[k][0])
MOV r1, r5
MOV r5, #0x8000
MOV r14, #21
@ used register
@r0 --- exc[] r1 --- x r7 --- j r8 --- ptr2 r5 --- 0x8000
THREE_LOOP:
@MOV r1, r5 @ptr1 = x
MOV r2, r8 @ptr = ptr2
LDR r3, [r2], #4 @h[0], h[1]
LDRSH r4, [r1], #2 @x[0]
LDRSH r6, [r1], #2 @x[1]
LDRSH r9, [r1], #2 @x[2]
SMULBB r10, r4, r3 @x[0] * h[0]
SMULBB r11, r6, r3 @x[1] * h[0]
SMULBB r12, r9, r3 @x[2] * h[0]
LDRSH r4, [r1], #2 @x[3]
SMLABT r10, r6, r3, r10 @x[1] * h[1]
SMLABT r11, r9, r3, r11 @x[2] * h[1]
SMLABT r12, r4, r3, r12 @x[3] * h[1]
LDR r3, [r2], #4 @h[2], h[3]
LDRSH r6, [r1], #2 @x[4]
SMLABB r10, r9, r3, r10 @x[2] * h[2]
SMLABB r11, r4, r3, r11 @x[3] * h[2]
SMLABB r12, r6, r3, r12 @x[4] * h[2]
LDRSH r9, [r1], #2 @x[5]
SMLABT r10, r4, r3, r10 @x[3] * h[3]
SMLABT r11, r6, r3, r11 @x[4] * h[3]
SMLABT r12, r9, r3, r12 @x[5] * h[3]
LDR r3, [r2], #4 @h[4], h[5]
LDRSH r4, [r1], #2 @x[6]
SMLABB r10, r6, r3, r10 @x[4] * h[4]
SMLABB r11, r9, r3, r11 @x[5] * h[4]
SMLABB r12, r4, r3, r12 @x[6] * h[4]
LDRSH r6, [r1], #2 @x[7]
SMLABT r10, r9, r3, r10 @x[5] * h[5]
SMLABT r11, r4, r3, r11 @x[6] * h[5]
SMLABT r12, r6, r3, r12 @x[7] * h[5]
LDR r3, [r2], #4 @h[6], h[7]
LDRSH r9, [r1], #2 @x[8]
SMLABB r10, r4, r3, r10 @x[6] * h[6]
SMLABB r11, r6, r3, r11 @x[7] * h[6]
SMLABB r12, r9, r3, r12 @x[8] * h[6]
LDRSH r4, [r1], #2 @x[9]
SMLABT r10, r6, r3, r10 @x[7] * h[7]
SMLABT r11, r9, r3, r11 @x[8] * h[7]
SMLABT r12, r4, r3, r12 @x[9] * h[7]
LDR r3, [r2], #4 @h[8], h[9]
LDRSH r6, [r1], #2 @x[10]
SMLABB r10, r9, r3, r10 @x[8] * h[8]
SMLABB r11, r4, r3, r11 @x[9] * h[8]
SMLABB r12, r6, r3, r12 @x[10] * h[8]
LDRSH r9, [r1], #2 @x[11]
SMLABT r10, r4, r3, r10 @x[9] * h[9]
SMLABT r11, r6, r3, r11 @x[10] * h[9]
SMLABT r12, r9, r3, r12 @x[11] * h[9]
LDR r3, [r2], #4 @h[10], h[11]
LDRSH r4, [r1], #2 @x[12]
SMLABB r10, r6, r3, r10 @x[10] * h[10]
SMLABB r11, r9, r3, r11 @x[11] * h[10]
SMLABB r12, r4, r3, r12 @x[12] * h[10]
LDRSH r6, [r1], #2 @x[13]
SMLABT r10, r9, r3, r10 @x[11] * h[11]
SMLABT r11, r4, r3, r11 @x[12] * h[11]
SMLABT r12, r6, r3, r12 @x[13] * h[11]
LDR r3, [r2], #4 @h[12], h[13]
LDRSH r9, [r1], #2 @x[14]
SMLABB r10, r4, r3, r10 @x[12] * h[12]
SMLABB r11, r6, r3, r11 @x[13] * h[12]
SMLABB r12, r9, r3, r12 @x[14] * h[12]
LDRSH r4, [r1], #2 @x[15]
SMLABT r10, r6, r3, r10 @x[13] * h[13]
SMLABT r11, r9, r3, r11 @x[14] * h[13]
SMLABT r12, r4, r3, r12 @x[15] * h[13]
LDR r3, [r2], #4 @h[14], h[15]
LDRSH r6, [r1], #2 @x[16]
SMLABB r10, r9, r3, r10 @x[14] * h[14]
SMLABB r11, r4, r3, r11 @x[15] * h[14]
SMLABB r12, r6, r3, r12 @x[16] * h[14]
LDRSH r9, [r1], #2 @x[17]
SMLABT r10, r4, r3, r10 @x[15] * h[15]
SMLABT r11, r6, r3, r11 @x[16] * h[15]
SMLABT r12, r9, r3, r12 @x[17] * h[15]
LDR r3, [r2], #4 @h[16], h[17]
LDRSH r4, [r1], #2 @x[18]
SMLABB r10, r6, r3, r10 @x[16] * h[16]
SMLABB r11, r9, r3, r11 @x[17] * h[16]
SMLABB r12, r4, r3, r12 @x[18] * h[16]
LDRSH r6, [r1], #2 @x[19]
SMLABT r10, r9, r3, r10 @x[17] * h[17]
SMLABT r11, r4, r3, r11 @x[18] * h[17]
SMLABT r12, r6, r3, r12 @x[19] * h[17]
LDR r3, [r2], #4 @h[18], h[19]
LDRSH r9, [r1], #2 @x[20]
SMLABB r10, r4, r3, r10 @x[18] * h[18]
SMLABB r11, r6, r3, r11 @x[19] * h[18]
SMLABB r12, r9, r3, r12 @x[20] * h[18]
LDRSH r4, [r1], #2 @x[21]
SMLABT r10, r6, r3, r10 @x[19] * h[19]
SMLABT r11, r9, r3, r11 @x[20] * h[19]
SMLABT r12, r4, r3, r12 @x[21] * h[19]
LDR r3, [r2], #4 @h[20], h[21]
LDRSH r6, [r1], #2 @x[22]
SMLABB r10, r9, r3, r10 @x[20] * h[20]
SMLABB r11, r4, r3, r11 @x[21] * h[20]
SMLABB r12, r6, r3, r12 @x[22] * h[20]
LDRSH r9, [r1], #2 @x[23]
SMLABT r10, r4, r3, r10 @x[21] * h[21]
SMLABT r11, r6, r3, r11 @x[22] * h[21]
SMLABT r12, r9, r3, r12 @x[23] * h[21]
LDR r3, [r2], #4 @h[22], h[23]
LDRSH r4, [r1], #2 @x[24]
SMLABB r10, r6, r3, r10 @x[22] * h[22]
SMLABB r11, r9, r3, r11 @x[23] * h[22]
SMLABB r12, r4, r3, r12 @x[24] * h[22]
LDRSH r6, [r1], #2 @x[25]
SMLABT r10, r9, r3, r10 @x[23] * h[23]
SMLABT r11, r4, r3, r11 @x[24] * h[23]
SMLABT r12, r6, r3, r12 @x[25] * h[23]
LDR r3, [r2], #4 @h[24], h[25]
LDRSH r9, [r1], #2 @x[26]
SMLABB r10, r4, r3, r10 @x[24] * h[24]
SMLABB r11, r6, r3, r11 @x[25] * h[24]
SMLABB r12, r9, r3, r12 @x[26] * h[24]
LDRSH r4, [r1], #2 @x[27]
SMLABT r10, r6, r3, r10 @x[25] * h[25]
SMLABT r11, r9, r3, r11 @x[26] * h[25]
SMLABT r12, r4, r3, r12 @x[27] * h[25]
LDR r3, [r2], #4 @h[26], h[27]
LDRSH r6, [r1], #2 @x[28]
SMLABB r10, r9, r3, r10 @x[26] * h[26]
SMLABB r11, r4, r3, r11 @x[27] * h[26]
SMLABB r12, r6, r3, r12 @x[28] * h[26]
LDRSH r9, [r1], #2 @x[29]
SMLABT r10, r4, r3, r10 @x[27] * h[27]
SMLABT r11, r6, r3, r11 @x[28] * h[27]
SMLABT r12, r9, r3, r12 @x[29] * h[27]
LDR r3, [r2], #4 @h[28], h[29]
LDRSH r4, [r1], #2 @x[30]
SMLABB r10, r6, r3, r10 @x[28] * h[28]
SMLABB r11, r9, r3, r11 @x[29] * h[28]
SMLABB r12, r4, r3, r12 @x[30] * h[28]
LDRSH r6, [r1], #2 @x[31]
SMLABT r10, r9, r3, r10 @x[29] * h[29]
SMLABT r11, r4, r3, r11 @x[30] * h[29]
SMLABT r12, r6, r3, r12 @x[31] * h[29]
LDR r3, [r2], #4 @h[30], h[31]
LDRSH r9, [r1], #2 @x[32]
SMLABB r10, r4, r3, r10 @x[30] * h[30]
SMLABB r11, r6, r3, r11 @x[31] * h[30]
SMLABB r12, r9, r3, r12 @x[32] * h[30]
LDRSH r4, [r1], #-60 @x[33]
SMLABT r10, r6, r3, r10 @x[31] * h[31]
SMLABT r11, r9, r3, r11 @x[32] * h[31]
SMLABT r12, r4, r3, r12 @x[33] * h[31]
@SSAT r10, #32, r10, LSL #2
@SSAT r11, #32, r11, LSL #2
@SSAT r12, #32, r12, LSL #2
MOV r10, r10, LSL #1
MOV r11, r11, LSL #1
MOV r12, r12, LSL #1
QADD r10, r10, r10
QADD r11, r11, r11
QADD r12, r12, r12
QADD r10, r10, r5
QADD r11, r11, r5
QADD r12, r12, r5
SUBS r14, r14, #1
MOV r10, r10, ASR #16
MOV r11, r11, ASR #16
MOV r12, r12, ASR #16
STRH r10, [r0], #2
STRH r11, [r0], #2
STRH r12, [r0], #2
BNE THREE_LOOP
MOV r2, r8 @ptr = ptr2
Last2LOOP:
LDR r3, [r2], #4 @h[0], h[1]
LDRSH r4, [r1], #2 @x[0]
LDRSH r6, [r1], #2 @x[1]
LDRSH r9, [r1], #2 @x[2]
SMULBB r10, r4, r3 @x[0] * h[0]
SMULBB r11, r6, r3 @x[1] * h[0]
SMLABT r10, r6, r3, r10 @x[1] * h[1]
SMLABT r11, r9, r3, r11 @x[2] * h[1]
LDR r3, [r2], #4 @h[2], h[3]
LDRSH r4, [r1], #2 @x[3]
LDRSH r6, [r1], #2 @x[4]
SMLABB r10, r9, r3, r10 @x[2] * h[2]
SMLABB r11, r4, r3, r11 @x[3] * h[2]
SMLABT r10, r4, r3, r10 @x[3] * h[3]
SMLABT r11, r6, r3, r11 @x[4] * h[3]
LDR r3, [r2], #4 @h[4], h[5]
LDRSH r9, [r1], #2 @x[5]
LDRSH r4, [r1], #2 @x[6]
SMLABB r10, r6, r3, r10 @x[4] * h[4]
SMLABB r11, r9, r3, r11 @x[5] * h[4]
SMLABT r10, r9, r3, r10 @x[5] * h[5]
SMLABT r11, r4, r3, r11 @x[6] * h[5]
LDR r3, [r2], #4 @h[6], h[7]
LDRSH r6, [r1], #2 @x[7]
LDRSH r9, [r1], #2 @x[8]
SMLABB r10, r4, r3, r10 @x[6] * h[6]
SMLABB r11, r6, r3, r11 @x[7] * h[6]
SMLABT r10, r6, r3, r10 @x[7] * h[7]
SMLABT r11, r9, r3, r11 @x[8] * h[7]
LDR r3, [r2], #4 @h[8], h[9]
LDRSH r4, [r1], #2 @x[9]
LDRSH r6, [r1], #2 @x[10]
SMLABB r10, r9, r3, r10 @x[8] * h[8]
SMLABB r11, r4, r3, r11 @x[9] * h[8]
SMLABT r10, r4, r3, r10 @x[9] * h[9]
SMLABT r11, r6, r3, r11 @x[10] * h[9]
LDR r3, [r2], #4 @h[10], h[11]
LDRSH r9, [r1], #2 @x[11]
LDRSH r4, [r1], #2 @x[12]
SMLABB r10, r6, r3, r10 @x[10] * h[10]
SMLABB r11, r9, r3, r11 @x[11] * h[10]
SMLABT r10, r9, r3, r10 @x[11] * h[11]
SMLABT r11, r4, r3, r11 @x[12] * h[11]
LDR r3, [r2], #4 @h[12], h[13]
LDRSH r6, [r1], #2 @x[13]
LDRSH r9, [r1], #2 @x[14]
SMLABB r10, r4, r3, r10 @x[12] * h[12]
SMLABB r11, r6, r3, r11 @x[13] * h[12]
SMLABT r10, r6, r3, r10 @x[13] * h[13]
SMLABT r11, r9, r3, r11 @x[14] * h[13]
LDR r3, [r2], #4 @h[14], h[15]
LDRSH r4, [r1], #2 @x[15]
LDRSH r6, [r1], #2 @x[16]
SMLABB r10, r9, r3, r10 @x[14] * h[14]
SMLABB r11, r4, r3, r11 @x[15] * h[14]
SMLABT r10, r4, r3, r10 @x[15] * h[15]
SMLABT r11, r6, r3, r11 @x[16] * h[15]
LDR r3, [r2], #4 @h[16], h[17]
LDRSH r9, [r1], #2 @x[17]
LDRSH r4, [r1], #2 @x[18]
SMLABB r10, r6, r3, r10 @x[16] * h[16]
SMLABB r11, r9, r3, r11 @x[17] * h[16]
SMLABT r10, r9, r3, r10 @x[17] * h[17]
SMLABT r11, r4, r3, r11 @x[18] * h[17]
LDR r3, [r2], #4 @h[18], h[19]
LDRSH r6, [r1], #2 @x[19]
LDRSH r9, [r1], #2 @x[20]
SMLABB r10, r4, r3, r10 @x[18] * h[18]
SMLABB r11, r6, r3, r11 @x[19] * h[18]
SMLABT r10, r6, r3, r10 @x[19] * h[19]
SMLABT r11, r9, r3, r11 @x[20] * h[19]
LDR r3, [r2], #4 @h[20], h[21]
LDRSH r4, [r1], #2 @x[21]
LDRSH r6, [r1], #2 @x[22]
SMLABB r10, r9, r3, r10 @x[20] * h[20]
SMLABB r11, r4, r3, r11 @x[21] * h[20]
SMLABT r10, r4, r3, r10 @x[21] * h[21]
SMLABT r11, r6, r3, r11 @x[22] * h[21]
LDR r3, [r2], #4 @h[22], h[23]
LDRSH r9, [r1], #2 @x[23]
LDRSH r4, [r1], #2 @x[24]
SMLABB r10, r6, r3, r10 @x[22] * h[22]
SMLABB r11, r9, r3, r11 @x[23] * h[22]
SMLABT r10, r9, r3, r10 @x[23] * h[23]
SMLABT r11, r4, r3, r11 @x[24] * h[23]
LDR r3, [r2], #4 @h[24], h[25]
LDRSH r6, [r1], #2 @x[25]
LDRSH r9, [r1], #2 @x[26]
SMLABB r10, r4, r3, r10 @x[24] * h[24]
SMLABB r11, r6, r3, r11 @x[25] * h[24]
SMLABT r10, r6, r3, r10 @x[25] * h[25]
SMLABT r11, r9, r3, r11 @x[26] * h[25]
LDR r3, [r2], #4 @h[26], h[27]
LDRSH r4, [r1], #2 @x[27]
LDRSH r6, [r1], #2 @x[28]
SMLABB r10, r9, r3, r10 @x[26] * h[26]
SMLABB r11, r4, r3, r11 @x[27] * h[26]
SMLABT r10, r4, r3, r10 @x[27] * h[27]
SMLABT r11, r6, r3, r11 @x[28] * h[27]
LDR r3, [r2], #4 @h[28], h[29]
LDRSH r9, [r1], #2 @x[29]
LDRSH r4, [r1], #2 @x[30]
SMLABB r10, r6, r3, r10 @x[28] * h[28]
SMLABB r11, r9, r3, r11 @x[29] * h[28]
SMLABT r10, r9, r3, r10 @x[29] * h[29]
SMLABT r11, r4, r3, r11 @x[30] * h[29]
LDR r3, [r2], #4 @h[30], h[31]
LDRSH r6, [r1], #2 @x[31]
LDRSH r9, [r1], #2 @x[32]
SMLABB r10, r4, r3, r10 @x[30] * h[30]
SMLABB r11, r6, r3, r11 @x[31] * h[30]
SMLABT r10, r6, r3, r10 @x[31] * h[31]
SMLABT r11, r9, r3, r11 @x[32] * h[31]
@SSAT r10, #32, r10, LSL #2
@SSAT r11, #32, r11, LSL #2
MOV r10, r10, LSL #1
MOV r11, r11, LSL #1
QADD r10, r10, r10
QADD r11, r11, r11
QADD r10, r10, r5
QADD r11, r11, r5
MOV r10, r10, ASR #16
MOV r11, r11, ASR #16
STRH r10, [r0], #2
STRH r11, [r0], #2
pred_lt4_end:
LDMFD r13!, {r4 - r12, r15}
Table:
.word inter4_2-Table
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 9,880 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/syn_filt_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@**********************************************************************/
@void Syn_filt(
@ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
@ Word16 x[], /* (i) : input signal */
@ Word16 y[], /* (o) : output signal */
@ Word16 mem[], /* (i/o) : memory associated with this filtering. */
@)
@***********************************************************************
@ a[] --- r0
@ x[] --- r1
@ y[] --- r2
@ mem[] --- r3
@ m --- 16 lg --- 80 update --- 1
.section .text
.global Syn_filt_asm
.extern voAWB_Copy
Syn_filt_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #700 @ y_buf[L_FRAME16k + M16k]
MOV r4, r3 @ copy mem[] address
MOV r5, r13 @ copy yy = y_buf address
@ for(i = 0@ i < m@ i++)
@{
@ *yy++ = mem[i]@
@}
LDRH r6, [r4], #2
LDRH r7, [r4], #2
LDRH r8, [r4], #2
LDRH r9, [r4], #2
LDRH r10, [r4], #2
LDRH r11, [r4], #2
LDRH r12, [r4], #2
LDRH r14, [r4], #2
STRH r6, [r5], #2
STRH r7, [r5], #2
STRH r8, [r5], #2
STRH r9, [r5], #2
STRH r10, [r5], #2
STRH r11, [r5], #2
STRH r12, [r5], #2
STRH r14, [r5], #2
LDRH r6, [r4], #2
LDRH r7, [r4], #2
LDRH r8, [r4], #2
LDRH r9, [r4], #2
LDRH r10, [r4], #2
LDRH r11, [r4], #2
LDRH r12, [r4], #2
LDRH r14, [r4], #2
STRH r6, [r5], #2
STRH r7, [r5], #2
STRH r8, [r5], #2
STRH r9, [r5], #2
STRH r10, [r5], #2
STRH r11, [r5], #2
STRH r12, [r5], #2
STRH r14, [r5], #2
LDRSH r5, [r0] @ load a[0]
MOV r8, #0 @ i = 0
MOV r5, r5, ASR #1 @ a0 = a[0] >> 1
@MOV r4, r13
@ load all a[]
LDR r14, =0xffff
LDRSH r6, [r0, #2] @ load a[1]
LDRSH r7, [r0, #4] @ load a[2]
LDRSH r9, [r0, #6] @ load a[3]
LDRSH r11,[r0, #8] @ load a[4]
AND r6, r6, r14
AND r9, r9, r14
ORR r10, r6, r7, LSL #16 @ -a[2] -- -a[1]
ORR r12, r9, r11, LSL #16 @ -a[4] -- -a[3]
STR r10, [r13, #-4]
STR r12, [r13, #-8]
LDRSH r6, [r0, #10] @ load a[5]
LDRSH r7, [r0, #12] @ load a[6]
LDRSH r9, [r0, #14] @ load a[7]
LDRSH r11,[r0, #16] @ load a[8]
AND r6, r6, r14
AND r9, r9, r14
ORR r10, r6, r7, LSL #16 @ -a[6] -- -a[5]
ORR r12, r9, r11, LSL #16 @ -a[8] -- -a[7]
STR r10, [r13, #-12]
STR r12, [r13, #-16]
LDRSH r6, [r0, #18] @ load a[9]
LDRSH r7, [r0, #20] @ load a[10]
LDRSH r9, [r0, #22] @ load a[11]
LDRSH r11,[r0, #24] @ load a[12]
AND r6, r6, r14
AND r9, r9, r14
ORR r10, r6, r7, LSL #16 @ -a[10] -- -a[9]
ORR r12, r9, r11, LSL #16 @ -a[12] -- -a[11]
STR r10, [r13, #-20]
STR r12, [r13, #-24]
LDRSH r6, [r0, #26] @ load a[13]
LDRSH r7, [r0, #28] @ load a[14]
LDRSH r9, [r0, #30] @ load a[15]
LDRSH r11,[r0, #32] @ load a[16]
AND r6, r6, r14
AND r9, r9, r14
ORR r10, r6, r7, LSL #16 @ -a[14] -- -a[13]
ORR r12, r9, r11, LSL #16 @ -a[16] -- -a[15]
STR r10, [r13, #-28]
STR r12, [r13, #-32]
ADD r4, r13, #32
LOOP:
LDRSH r6, [r1], #2 @ load x[i]
ADD r10, r4, r8, LSL #1 @ temp_p = yy + i
MUL r0, r5, r6 @ L_tmp = x[i] * a0
@ for(j = 1@ j <= m, j+=8)
LDR r7, [r13, #-4] @ -a[2] -a[1]
LDRSH r9, [r10, #-2] @ *(temp_p - 1)
LDRSH r12, [r10, #-4] @ *(temp_p - 2)
SMULBB r14, r9, r7 @ -a[1] * (*(temp_p -1))
LDRSH r6, [r10, #-6] @ *(temp_p - 3)
SMLABT r14, r12, r7, r14 @ -a[2] * (*(temp_p - 2))
LDR r7, [r13, #-8] @ -a[4] -a[3]
LDRSH r11, [r10, #-8] @ *(temp_p - 4)
SMLABB r14, r6, r7, r14 @ -a[3] * (*(temp_p -3))
LDRSH r9, [r10, #-10] @ *(temp_p - 5)
SMLABT r14, r11, r7, r14 @ -a[4] * (*(temp_p -4))
LDR r7, [r13, #-12] @ -a[6] -a[5]
LDRSH r12, [r10, #-12] @ *(temp_p - 6)
SMLABB r14, r9, r7, r14 @ -a[5] * (*(temp_p -5))
LDRSH r6, [r10, #-14] @ *(temp_p - 7)
SMLABT r14, r12, r7, r14 @ -a[6] * (*(temp_p - 6))
LDR r7, [r13, #-16] @ -a[8] -a[7]
LDRSH r11, [r10, #-16] @ *(temp_p - 8)
SMLABB r14, r6, r7, r14 @ -a[7] * (*(temp_p -7))
LDRSH r9, [r10, #-18] @ *(temp_p - 9)
SMLABT r14, r11, r7, r14 @ -a[8] * (*(temp_p -8))
LDR r7, [r13, #-20] @ -a[10] -a[9]
LDRSH r12, [r10, #-20] @ *(temp_p - 10)
SMLABB r14, r9, r7, r14 @ -a[9] * (*(temp_p -9))
LDRSH r6, [r10, #-22] @ *(temp_p - 11)
SMLABT r14, r12, r7, r14 @ -a[10] * (*(temp_p - 10))
LDR r7, [r13, #-24] @ -a[12] -a[11]
LDRSH r11, [r10, #-24] @ *(temp_p - 12)
SMLABB r14, r6, r7, r14 @ -a[11] * (*(temp_p -11))
LDRSH r9, [r10, #-26] @ *(temp_p - 13)
SMLABT r14, r11, r7, r14 @ -a[12] * (*(temp_p -12))
LDR r7, [r13, #-28] @ -a[14] -a[13]
LDRSH r12, [r10, #-28] @ *(temp_p - 14)
SMLABB r14, r9, r7, r14 @ -a[13] * (*(temp_p -13))
LDRSH r6, [r10, #-30] @ *(temp_p - 15)
SMLABT r14, r12, r7, r14 @ -a[14] * (*(temp_p - 14))
LDR r7, [r13, #-32] @ -a[16] -a[15]
LDRSH r11, [r10, #-32] @ *(temp_p - 16)
SMLABB r14, r6, r7, r14 @ -a[15] * (*(temp_p -15))
SMLABT r14, r11, r7, r14 @ -a[16] * (*(temp_p -16))
RSB r14, r14, r0
MOV r7, r14, LSL #4 @ L_tmp <<=4
ADD r8, r8, #1
ADD r14, r7, #0x8000
MOV r7, r14, ASR #16 @ (L_tmp + 0x8000) >> 16
CMP r8, #80
STRH r7, [r10] @ yy[i]
STRH r7, [r2], #2 @ y[i]
BLT LOOP
@ update mem[]
ADD r5, r13, #160 @ yy[64] address
MOV r1, r3
MOV r0, r5
MOV r2, #16
BL voAWB_Copy
Syn_filt_asm_end:
ADD r13, r13, #700
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,610 | media/module/codecs/amrwb/enc/src/asm/ARMV5E/Deemph_32_opt.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@void Deemph_32(
@ Word16 x_hi[], /* (i) : input signal (bit31..16) */
@ Word16 x_lo[], /* (i) : input signal (bit15..4) */
@ Word16 y[], /* (o) : output signal (x16) */
@ Word16 mu, /* (i) Q15 : deemphasis factor */
@ Word16 L, /* (i) : vector size */
@ Word16 * mem /* (i/o) : memory (y[-1]) */
@ )
@x_hi RN R0
@x_lo RN R1
@y[] RN R2
@*mem RN R3
.section .text
.global Deemph_32_asm
Deemph_32_asm:
STMFD r13!, {r4 - r12, r14}
MOV r4, #2 @i=0
LDRSH r6, [r0], #2 @load x_hi[0]
LDRSH r7, [r1], #2 @load x_lo[0]
LDR r5, =22282 @r5---mu
MOV r11, #0x8000
@y[0]
MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16
MOV r8, r5, ASR #1 @fac = mu >> 1
LDR r5, [r3]
ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4
MOV r10, r12, LSL #3 @L_tmp <<= 3
MUL r9, r5, r8
LDRSH r6, [r0], #2 @load x_hi[1]
QDADD r10, r10, r9
LDRSH r7, [r1], #2 @load x_lo[1]
MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac)
QADD r10, r12, r11
MOV r14, r10, ASR #16 @y[0] = round(L_tmp)
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
STRH r14, [r2], #2 @update y[0]
MOV r10, r12, LSL #3
MUL r9, r14, r8
QDADD r10, r10, r9
MOV r12, r10, LSL #1
QADD r10, r12, r11
MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
LOOP:
LDRSH r6, [r0], #2 @load x_hi[]
LDRSH r7, [r1], #2
STRH r14, [r2], #2
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
MUL r9, r14, r8
MOV r10, r12, LSL #3
QDADD r10, r10, r9
LDRSH r6, [r0], #2 @load x_hi[]
MOV r12, r10, LSL #1
QADD r10, r12, r11
LDRSH r7, [r1], #2
MOV r14, r10, ASR #16
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
STRH r14, [r2], #2
MUL r9, r14, r8
MOV r10, r12, LSL #3
QDADD r10, r10, r9
ADD r4, r4, #2
MOV r12, r10, LSL #1
QADD r10, r12, r11
CMP r4, #64
MOV r14, r10, ASR #16
BLT LOOP
STR r14, [r3]
STRH r14, [r2]
LDMFD r13!, {r4 - r12, r15}
@ENDP
.end
|
ProjectEverest/frameworks_av | 6,000 | media/module/codecs/amrwb/enc/src/asm/ARMV7/cor_h_vec_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@static void cor_h_vec_012(
@ Word16 h[], /* (i) scaled impulse response */
@ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
@ Word16 track, /* (i) track to use */
@ Word16 sign[], /* (i) sign vector */
@ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
@ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
@ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
@)
@r0 ---- h[]
@r1 ---- vec[]
@r2 ---- track
@r3 ---- sign[]
@r4 ---- rrixix[][NB_POS]
@r5 ---- cor_1[]
@r6 ---- cor_2[]
.section .text
.global cor_h_vec_012_asm
cor_h_vec_012_asm:
STMFD r13!, {r4 - r12, r14}
LDR r4, [r13, #40] @load rrixix[][NB_POS]
ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track]
MOV r4, #0 @i=0
@r0 --- h[], r1 --- vec[], r2 --- pos
@r3 --- sign[], r4 --- i, r7 --- p0
LOOPi:
MOV r5, #0 @L_sum1 = 0
MOV r6, #0 @L_sum2 = 0
ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
MOV r10, r0 @p1 = h
RSB r11, r2, #62 @j=62-pos
LOOPj1:
LDRSH r12, [r10], #2
LDRSH r8, [r9], #2
LDRSH r14, [r9]
SUBS r11, r11, #1
MLA r5, r12, r8, r5
MLA r6, r12, r14, r6
BGE LOOPj1
LDRSH r12, [r10], #2 @*p1++
MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
MLA r5, r12, r14, r5
MOV r14, #0x8000
MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2)
ADD r10, r6, r14
ADD r9, r5, r14
MOV r5, r9, ASR #16
MOV r6, r10, ASR #16
ADD r9, r3, r2, LSL #1 @address of sign[pos]
ADD r8, r7, #32
LDRSH r10, [r9], #2 @sign[pos]
LDRSH r11, [r9] @sign[pos + 1]
MUL r12, r5, r10
MUL r14, r6, r11
MOV r5, r12, ASR #15
MOV r6, r14, ASR #15
LDR r9, [r13, #44]
LDR r12, [r13, #48]
LDRSH r10, [r7], #2 @*p0++
LDRSH r11, [r8] @*p3++
ADD r9, r9, r4, LSL #1
ADD r12, r12, r4, LSL #1
ADD r5, r5, r10
ADD r6, r6, r11
STRH r5, [r9]
STRH r6, [r12]
ADD r2, r2, #4
MOV r5, #0 @L_sum1 = 0
MOV r6, #0 @L_sum2 = 0
ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
MOV r10, r0 @p1 = h
RSB r11, r2, #62 @j=62-pos
ADD r4, r4, #1 @i++
LOOPj2:
LDRSH r12, [r10], #2
LDRSH r8, [r9], #2
LDRSH r14, [r9]
SUBS r11, r11, #1
MLA r5, r12, r8, r5
MLA r6, r12, r14, r6
BGE LOOPj2
LDRSH r12, [r10], #2 @*p1++
MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
MLA r5, r12, r14, r5
MOV r14, #0x8000
MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2)
ADD r10, r6, r14
ADD r9, r5, r14
MOV r5, r9, ASR #16
MOV r6, r10, ASR #16
ADD r9, r3, r2, LSL #1 @address of sign[pos]
ADD r8, r7, #32
LDRSH r10, [r9], #2 @sign[pos]
LDRSH r11, [r9] @sign[pos + 1]
MUL r12, r5, r10
MUL r14, r6, r11
MOV r5, r12, ASR #15
MOV r6, r14, ASR #15
LDR r9, [r13, #44]
LDR r12, [r13, #48]
LDRSH r10, [r7], #2 @*p0++
LDRSH r11, [r8] @*p3++
ADD r9, r9, r4, LSL #1
ADD r12, r12, r4, LSL #1
ADD r5, r5, r10
ADD r6, r6, r11
STRH r5, [r9]
STRH r6, [r12]
ADD r4, r4, #1 @i+1
ADD r2, r2, #4 @pos += STEP
CMP r4, #16
BLT LOOPi
the_end:
LDMFD r13!, {r4 - r12, r15}
.end
|
ProjectEverest/frameworks_av | 4,943 | media/module/codecs/amrwb/enc/src/asm/ARMV7/scale_sig_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@void Scale_sig(
@ Word16 x[], /* (i/o) : signal to scale */
@ Word16 lg, /* (i) : size of x[] */
@ Word16 exp /* (i) : exponent: x = round(x << exp) */
@)
@***********************************************************************
@ x[] --- r0
@ lg --- r1
@ exp --- r2
.section .text
.global Scale_sig_opt
Scale_sig_opt:
STMFD r13!, {r4 - r12, r14}
MOV r4, #4
VMOV.S32 Q15, #0x8000
VDUP.S32 Q14, r2
MOV r5, r0 @ copy x[] address
CMP r1, #64
MOVEQ r4, #1
BEQ LOOP
CMP r1, #128
MOVEQ r4, #2
BEQ LOOP
CMP r1, #256
BEQ LOOP
CMP r1, #80
MOVEQ r4, #1
BEQ LOOP1
LOOP1:
VLD1.S16 {Q0, Q1}, [r5]! @load 16 Word16 x[]
VSHLL.S16 Q10, D0, #16
VSHLL.S16 Q11, D1, #16
VSHLL.S16 Q12, D2, #16
VSHLL.S16 Q13, D3, #16
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VADDHN.S32 D16, Q10, Q15
VADDHN.S32 D17, Q11, Q15
VADDHN.S32 D18, Q12, Q15
VADDHN.S32 D19, Q13, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
LOOP:
VLD1.S16 {Q0, Q1}, [r5]! @load 16 Word16 x[]
VLD1.S16 {Q2, Q3}, [r5]! @load 16 Word16 x[]
VLD1.S16 {Q4, Q5}, [r5]! @load 16 Word16 x[]
VLD1.S16 {Q6, Q7}, [r5]! @load 16 Word16 x[]
VSHLL.S16 Q8, D0, #16
VSHLL.S16 Q9, D1, #16
VSHLL.S16 Q10, D2, #16
VSHLL.S16 Q11, D3, #16
VSHL.S32 Q8, Q8, Q14
VSHL.S32 Q9, Q9, Q14
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VADDHN.S32 D16, Q8, Q15
VADDHN.S32 D17, Q9, Q15
VADDHN.S32 D18, Q10, Q15
VADDHN.S32 D19, Q11, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
VSHLL.S16 Q12, D4, #16
VSHLL.S16 Q13, D5, #16
VSHLL.S16 Q10, D6, #16
VSHLL.S16 Q11, D7, #16
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VADDHN.S32 D16, Q12, Q15
VADDHN.S32 D17, Q13, Q15
VADDHN.S32 D18, Q10, Q15
VADDHN.S32 D19, Q11, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
VSHLL.S16 Q10, D8, #16
VSHLL.S16 Q11, D9, #16
VSHLL.S16 Q12, D10, #16
VSHLL.S16 Q13, D11, #16
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VADDHN.S32 D16, Q10, Q15
VADDHN.S32 D17, Q11, Q15
VADDHN.S32 D18, Q12, Q15
VADDHN.S32 D19, Q13, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
VSHLL.S16 Q10, D12, #16
VSHLL.S16 Q11, D13, #16
VSHLL.S16 Q12, D14, #16
VSHLL.S16 Q13, D15, #16
VSHL.S32 Q10, Q10, Q14
VSHL.S32 Q11, Q11, Q14
VSHL.S32 Q12, Q12, Q14
VSHL.S32 Q13, Q13, Q14
VADDHN.S32 D16, Q10, Q15
VADDHN.S32 D17, Q11, Q15
VADDHN.S32 D18, Q12, Q15
VADDHN.S32 D19, Q13, Q15
VST1.S16 {Q8, Q9}, [r0]! @store 16 Word16 x[]
SUBS r4, r4, #1
BGT LOOP
Scale_sig_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 4,836 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Syn_filt_32_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@void Syn_filt_32(
@ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
@ Word16 m, /* (i) : order of LP filter */
@ Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
@ Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
@ Word16 sig_hi[], /* (o) /16 : synthesis high */
@ Word16 sig_lo[], /* (o) /16 : synthesis low */
@ Word16 lg /* (i) : size of filtering */
@)
@***********************************************************************
@ a[] --- r0
@ m --- r1
@ exc[] --- r2
@ Qnew --- r3
@ sig_hi[] --- r4
@ sig_lo[] --- r5
@ lg --- r6
.section .text
.global Syn_filt_32_asm
Syn_filt_32_asm:
STMFD r13!, {r4 - r12, r14}
LDR r4, [r13, #40] @ get sig_hi[] address
LDR r5, [r13, #44] @ get sig_lo[] address
LDRSH r6, [r0], #2 @ load Aq[0]
ADD r7, r3, #4 @ 4 + Q_new
MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new)
SUB r10, r4, #32 @ sig_hi[-16] address
SUB r11, r5, #32 @ sig_lo[-16] address
VLD1.S16 {D0, D1, D2, D3}, [r0]! @a[1] ~ a[16]
MOV r8, #0 @ i = 0
VLD1.S16 {D4, D5, D6, D7}, [r10]! @ sig_hi[-16] ~ sig_hi[-1]
VREV64.16 D0, D0
VREV64.16 D1, D1
VLD1.S16 {D8, D9, D10, D11}, [r11]! @ sig_lo[-16] ~ sig_lo[-1]
VREV64.16 D2, D2
VREV64.16 D3, D3
VDUP.S32 Q15, r8
SYN_LOOP:
LDRSH r6, [r2], #2 @exc[i]
@L_tmp = L_msu(L_tmp, sig_lo[i - j], a[j])@
VMULL.S16 Q10, D8, D3
VEXT.8 D8, D8, D9, #2
VMLAL.S16 Q10, D9, D2
VMLAL.S16 Q10, D10, D1
VMLAL.S16 Q10, D11, D0
VEXT.8 D9, D9, D10, #2
VEXT.8 D10, D10, D11, #2
VPADD.S32 D28, D20, D21
MUL r12, r6, r3 @exc[i] * a0
VPADD.S32 D29, D28, D28
VDUP.S32 Q10, D29[0] @result1
VMULL.S16 Q11, D4, D3
VMLAL.S16 Q11, D5, D2
VSUB.S32 Q10, Q15, Q10
@L_tmp = L_msu(L_tmp, sig_hi[i - j], a[j])@
VMLAL.S16 Q11, D6, D1
VEXT.8 D4, D4, D5, #2
VMLAL.S16 Q11, D7, D0
VEXT.8 D5, D5, D6, #2
VEXT.8 D6, D6, D7, #2
VPADD.S32 D28, D22, D23
VPADD.S32 D29, D28, D28
MOV r14, r12, LSL #1 @exc[i] * a0 << 1
VDUP.S32 Q11, D29[0] @result2
VSHR.S32 Q10, Q10, #11 @result1 >>= 11
VSHL.S32 Q11, Q11, #1 @result2 <<= 1
VDUP.S32 Q12, r14
VADD.S32 Q12, Q12, Q10 @L_tmp = L_tmp - (result1 >>= 11) - (result2 <<= 1)
VSUB.S32 Q12, Q12, Q11
VSHL.S32 Q12, Q12, #3 @L_tmp <<= 3
VSHRN.S32 D20, Q12, #16 @sig_hi[i] = L_tmp >> 16@
VMOV.S16 r10, D20[0]
VSHR.S32 Q12, Q12, #4 @L_tmp >>= 4
VEXT.8 D7, D7, D20, #2
STRH r10, [r4], #2 @store sig_hi[i]
VMOV.S32 r11, D24[0] @r11 --- L_tmp >>= 4
ADD r8, r8, #1
SUB r12, r11, r10, LSL #12
@MOV r11, r12, ASR #16 @sig_lo[i]
VDUP.S16 D21, r12
VEXT.8 D11, D11, D21, #2
STRH r12, [r5], #2 @stroe sig_lo[i]
CMP r8, #64
BLT SYN_LOOP
Syn_filt_32_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,600 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Deemph_32_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Deemph_32(
@ Word16 x_hi[], /* (i) : input signal (bit31..16) */
@ Word16 x_lo[], /* (i) : input signal (bit15..4) */
@ Word16 y[], /* (o) : output signal (x16) */
@ Word16 mu, /* (i) Q15 : deemphasis factor */
@ Word16 L, /* (i) : vector size */
@ Word16 * mem /* (i/o) : memory (y[-1]) */
@ )
@x_hi RN R0
@x_lo RN R1
@y[] RN R2
@*mem RN R3
.section .text
.global Deemph_32_asm
Deemph_32_asm:
STMFD r13!, {r4 - r12, r14}
MOV r4, #2 @i=0
LDRSH r6, [r0], #2 @load x_hi[0]
LDRSH r7, [r1], #2 @load x_lo[0]
LDR r5, =22282 @r5---mu
MOV r11, #0x8000
@y[0]
MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16
MOV r8, r5, ASR #1 @fac = mu >> 1
LDR r5, [r3]
ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4
MOV r10, r12, LSL #3 @L_tmp <<= 3
MUL r9, r5, r8
LDRSH r6, [r0], #2 @load x_hi[1]
QDADD r10, r10, r9
LDRSH r7, [r1], #2 @load x_lo[1]
MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac)
QADD r10, r12, r11
MOV r14, r10, ASR #16 @y[0] = round(L_tmp)
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
STRH r14, [r2], #2 @update y[0]
MOV r10, r12, LSL #3
MUL r9, r14, r8
QDADD r10, r10, r9
MOV r12, r10, LSL #1
QADD r10, r12, r11
MOV r14, r10, ASR #16 @y[1] = round(L_tmp)
LOOP:
LDRSH r6, [r0], #2 @load x_hi[]
LDRSH r7, [r1], #2
STRH r14, [r2], #2
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
MUL r9, r14, r8
MOV r10, r12, LSL #3
QDADD r10, r10, r9
LDRSH r6, [r0], #2 @load x_hi[]
MOV r12, r10, LSL #1
QADD r10, r12, r11
LDRSH r7, [r1], #2
MOV r14, r10, ASR #16
MOV r10, r6, LSL #16
ADD r12, r10, r7, LSL #4
STRH r14, [r2], #2
MUL r9, r14, r8
MOV r10, r12, LSL #3
QDADD r10, r10, r9
ADD r4, r4, #2
MOV r12, r10, LSL #1
QADD r10, r12, r11
CMP r4, #64
MOV r14, r10, ASR #16
BLT LOOP
STR r14, [r3]
STRH r14, [r2]
LDMFD r13!, {r4 - r12, r15}
.end
|
ProjectEverest/frameworks_av | 5,738 | media/module/codecs/amrwb/enc/src/asm/ARMV7/convolve_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@*void Convolve (
@* Word16 x[], /* (i) : input vector */
@* Word16 h[], /* (i) : impulse response */
@* Word16 y[], /* (o) : output vector */
@* Word16 L /* (i) : vector size */
@*)
@
@ r0 --- x[]
@ r1 --- h[]
@ r2 --- y[]
@ r3 --- L
.section .text
.global Convolve_asm
Convolve_asm:
STMFD r13!, {r4 - r12, r14}
MOV r3, #0
MOV r11, #0x8000
LOOP:
@MOV r8, #0 @ s = 0
ADD r4, r1, r3, LSL #1 @ tmpH address
ADD r5, r3, #1 @ i = n + 1
MOV r6, r0
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4] @ *tmpH--
SUB r5, r5, #1
VMOV.S32 Q10, #0
MUL r8, r9, r10
LOOP1:
CMP r5, #0
BLE L1
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
VREV64.16 D1, D1
SUBS r5, r5, #4
VMLAL.S16 Q10, D0, D1
B LOOP1
L1:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r5, r5, r8
ADD r5, r11, r5, LSL #1
MOV r5, r5, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r5, [r2], #2 @y[n]
@MOV r8, #0
ADD r4, r1, r3, LSL #1 @tmpH address
ADD r5, r3, #1
MOV r6, r0
LDRSH r9, [r6], #2 @ *tmpX++
LDRSH r10, [r4], #-2
LDRSH r12, [r6], #2
LDRSH r14, [r4]
MUL r8, r9, r10
SUB r5, r5, #2
MLA r8, r12, r14, r8
VMOV.S32 Q10, #0
LOOP2:
CMP r5, #0
BLE L2
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
SUBS r5, r5, #4
VREV64.16 D1, D1
VMLAL.S16 Q10, D0, D1
B LOOP2
L2:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r8, r8, r5
ADD r8, r11, r8, LSL #1
MOV r8, r8, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r8, [r2], #2 @y[n]
@MOV r8, #0
ADD r4, r1, r3, LSL #1
ADD r5, r3, #1
MOV r6, r0
LDRSH r9, [r6], #2
LDRSH r10, [r4], #-2
LDRSH r12, [r6], #2
LDRSH r14, [r4], #-2
MUL r8, r9, r10
LDRSH r9, [r6], #2
LDRSH r10, [r4]
MLA r8, r12, r14, r8
SUB r5, r5, #3
MLA r8, r9, r10, r8
VMOV.S32 Q10, #0
LOOP3:
CMP r5, #0
BLE L3
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
VREV64.16 D1, D1
SUBS r5, r5, #4
VMLAL.S16 Q10, D0, D1
B LOOP3
L3:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r8, r8, r5
ADD r8, r11, r8, LSL #1
MOV r8, r8, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r8, [r2], #2 @y[n]
ADD r5, r3, #1 @ i = n + 1
ADD r4, r1, r5, LSL #1 @ tmpH address
MOV r6, r0
VMOV.S32 Q10, #0
LOOP4:
CMP r5, #0
BLE L4
SUB r4, r4, #8
MOV r9, r4
VLD1.S16 D0, [r6]!
VLD1.S16 D1, [r9]!
VREV64.16 D1, D1
SUBS r5, r5, #4
VMLAL.S16 Q10, D0, D1
B LOOP4
L4:
VADD.S32 D20, D20, D21
VPADD.S32 D20, D20, D20
VMOV.S32 r5, D20[0]
ADD r5, r11, r5, LSL #1
MOV r5, r5, LSR #16 @extract_h(s)
ADD r3, r3, #1
STRH r5, [r2], #2 @y[n]
CMP r3, #64
BLT LOOP
Convolve_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 4,376 | media/module/codecs/amrwb/enc/src/asm/ARMV7/residu_asm_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Residu (
@ Word16 a[], /* (i) : prediction coefficients */
@ Word16 x[], /* (i) : speech signal */
@ Word16 y[], /* (o) : residual signal */
@ Word16 lg /* (i) : size of filtering */
@)
@a[] RN r0
@x[] RN r1
@y[] RN r2
@lg RN r3
.section .text
.global Residu_opt
Residu_opt:
STMFD r13!, {r4 - r12, r14}
SUB r7, r3, #4 @i = lg - 4
VLD1.S16 {D0, D1, D2, D3}, [r0]! @get all a[]
VLD1.S16 {D4}, [r0]!
VMOV.S32 Q8, #0x8000
LOOP1:
ADD r9, r1, r7, LSL #1 @copy the address
ADD r10, r2, r7, LSL #1
MOV r8, r9
VLD1.S16 D5, [r8]! @get x[i], x[i+1], x[i+2], x[i+3]
VQDMULL.S16 Q10, D5, D0[0] @finish the first L_mult
SUB r8, r9, #2 @get the x[i-1] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D0[1]
SUB r8, r9, #4 @load the x[i-2] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D0[2]
SUB r8, r9, #6 @load the x[i-3] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D0[3]
SUB r8, r9, #8 @load the x[i-4] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[0]
SUB r8, r9, #10 @load the x[i-5] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[1]
SUB r8, r9, #12 @load the x[i-6] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[2]
SUB r8, r9, #14 @load the x[i-7] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D1[3]
SUB r8, r9, #16 @load the x[i-8] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[0]
SUB r8, r9, #18 @load the x[i-9] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[1]
SUB r8, r9, #20 @load the x[i-10] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[2]
SUB r8, r9, #22 @load the x[i-11] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D2[3]
SUB r8, r9, #24 @load the x[i-12] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[0]
SUB r8, r9, #26 @load the x[i-13] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[1]
SUB r8, r9, #28 @load the x[i-14] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[2]
SUB r8, r9, #30 @load the x[i-15] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D3[3]
SUB r8, r9, #32 @load the x[i-16] address
VLD1.S16 D5, [r8]!
VQDMLAL.S16 Q10, D5, D4[0]
SUB r7, r7, #4 @i-=4
VQSHL.S32 Q10, Q10, #4
VQADD.S32 Q10, Q10, Q8
VSHRN.S32 D5, Q10, #16
VST1.S16 D5, [r10]!
CMP r7, #0
BGE LOOP1
Residu_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,667 | media/module/codecs/amrwb/enc/src/asm/ARMV7/pred_lt4_1_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Pred_lt4(
@ Word16 exc[], /* in/out: excitation buffer */
@ Word16 T0, /* input : integer pitch lag */
@ Word16 frac, /* input : fraction of lag */
@ Word16 L_subfr /* input : subframe size */
@)
@***********************************************************************
@ r0 --- exc[]
@ r1 --- T0
@ r2 --- frac
@ r3 --- L_subfr
.section .text
.global pred_lt4_asm
.extern inter4_2
.hidden inter4_2
pred_lt4_asm:
STMFD r13!, {r4 - r12, r14}
SUB r4, r0, r1, LSL #1 @ x = exc - T0
RSB r2, r2, #0 @ frac = - frac
SUB r4, r4, #30 @ x -= L_INTERPOL2 - 1
CMP r2, #0
ADDLT r2, r2, #4 @ frac += UP_SAMP
SUBLT r4, r4, #2 @ x--
ADR r8, Lable1
LDR r11, [r8]
ADD r11, r8
RSB r2, r2, #3 @ k = UP_SAMP - 1 - frac
MOV r8, #0 @ j = 0
ADD r11, r11, r2, LSL #6 @ get inter4_2[k][]
VLD1.S16 {Q0, Q1}, [r11]!
VLD1.S16 {Q2, Q3}, [r11]!
MOV r6, #0x8000
VLD1.S16 {Q4, Q5}, [r4]! @load 16 x[]
VLD1.S16 {Q6, Q7}, [r4]! @load 16 x[]
LOOP:
VQDMULL.S16 Q15, D8, D0
VQDMLAL.S16 Q15, D9, D1
VQDMLAL.S16 Q15, D10, D2
VQDMLAL.S16 Q15, D11, D3
VQDMLAL.S16 Q15, D12, D4
VQDMLAL.S16 Q15, D13, D5
VQDMLAL.S16 Q15, D14, D6
VQDMLAL.S16 Q15, D15, D7
LDRSH r12, [r4], #2
VEXT.S16 D8, D8, D9, #1
VEXT.S16 D9, D9, D10, #1
VEXT.S16 D10, D10, D11, #1
VEXT.S16 D11, D11, D12, #1
VDUP.S16 D24, r12
VEXT.S16 D12, D12, D13, #1
VEXT.S16 D13, D13, D14, #1
VQADD.S32 D30, D30, D31
MOV r11, #0x8000
VPADD.S32 D30, D30, D30
ADD r8, r8, #1
VMOV.S32 r12, D30[0]
VEXT.S16 D14, D14, D15, #1
QADD r1, r12, r12 @ L_sum = (L_sum << 2)
VEXT.S16 D15, D15, D24, #1
QADD r5, r1, r6
MOV r1, r5, ASR #16
CMP r8, r3
STRH r1, [r0], #2 @ exc[j] = (L_sum + 0x8000) >> 16
BLT LOOP
pred_lt4_end:
LDMFD r13!, {r4 - r12, r15}
Lable1:
.word inter4_2-Lable1
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 9,556 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Norm_Corr_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@static void Norm_Corr (Word16 exc[], /* (i) : excitation buffer */
@ Word16 xn[], /* (i) : target vector */
@ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
@ Word16 L_subfr, /* (i) : sub-frame length */
@ Word16 t_min, /* (i) : minimum value of pitch lag. */
@ Word16 t_max, /* (i) : maximum value of pitch lag. */
@ Word16 corr_norm[]) /* (o) Q15 : normalized correlation */
@
@ r0 --- exc[]
@ r1 --- xn[]
@ r2 --- h[]
@ r3 --- L_subfr
@ r4 --- t_min
@ r5 --- t_max
@ r6 --- corr_norm[]
.section .text
.global Norm_corr_asm
.extern Convolve_asm
.extern Isqrt_n
@******************************
@ constant
@******************************
.equ EXC , 0
.equ XN , 4
.equ H , 8
.equ L_SUBFR , 12
.equ voSTACK , 172
.equ T_MIN , 212
.equ T_MAX , 216
.equ CORR_NORM , 220
Norm_corr_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #voSTACK
ADD r8, r13, #20 @get the excf[L_SUBFR]
LDR r4, [r13, #T_MIN] @get t_min
RSB r11, r4, #0 @k = -t_min
ADD r5, r0, r11, LSL #1 @get the &exc[k]
@transfer Convolve function
STMFD sp!, {r0 - r3}
MOV r0, r5
MOV r1, r2
MOV r2, r8 @r2 --- excf[]
BL Convolve_asm
LDMFD sp!, {r0 - r3}
@ r8 --- excf[]
MOV r14, r1 @copy xn[] address
MOV r7, #1
VLD1.S16 {Q0, Q1}, [r14]!
VLD1.S16 {Q2, Q3}, [r14]!
VLD1.S16 {Q4, Q5}, [r14]!
VLD1.S16 {Q6, Q7}, [r14]!
VMULL.S16 Q10, D0, D0
VMLAL.S16 Q10, D1, D1
VMLAL.S16 Q10, D2, D2
VMLAL.S16 Q10, D3, D3
VMLAL.S16 Q10, D4, D4
VMLAL.S16 Q10, D5, D5
VMLAL.S16 Q10, D6, D6
VMLAL.S16 Q10, D7, D7
VMLAL.S16 Q10, D8, D8
VMLAL.S16 Q10, D9, D9
VMLAL.S16 Q10, D10, D10
VMLAL.S16 Q10, D11, D11
VMLAL.S16 Q10, D12, D12
VMLAL.S16 Q10, D13, D13
VMLAL.S16 Q10, D14, D14
VMLAL.S16 Q10, D15, D15
VQADD.S32 D20, D20, D21
VMOV.S32 r9, D20[0]
VMOV.S32 r10, D20[1]
QADD r6, r9, r10
QADD r6, r6, r6
QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1;
CLZ r7, r9
SUB r6, r7, #1 @exp = norm_l(L_tmp)
RSB r7, r6, #32 @exp = 32 - exp
MOV r6, r7, ASR #1
RSB r7, r6, #0 @scale = -(exp >> 1)
@loop for every possible period
@for(t = t_min@ t <= t_max@ t++)
@r7 --- scale r4 --- t_min r8 --- excf[]
LOOPFOR:
ADD r14, r13, #20 @copy of excf[]
MOV r12, r1 @copy of xn[]
MOV r8, #0x8000
VLD1.S16 {Q0, Q1}, [r14]! @ load 16 excf[]
VLD1.S16 {Q2, Q3}, [r14]! @ load 16 excf[]
VLD1.S16 {Q4, Q5}, [r12]! @ load 16 x[]
VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[]
VMULL.S16 Q10, D0, D0 @L_tmp1 += excf[] * excf[]
VMULL.S16 Q11, D0, D8 @L_tmp += x[] * excf[]
VMLAL.S16 Q10, D1, D1
VMLAL.S16 Q11, D1, D9
VMLAL.S16 Q10, D2, D2
VMLAL.S16 Q11, D2, D10
VMLAL.S16 Q10, D3, D3
VMLAL.S16 Q11, D3, D11
VMLAL.S16 Q10, D4, D4
VMLAL.S16 Q11, D4, D12
VMLAL.S16 Q10, D5, D5
VMLAL.S16 Q11, D5, D13
VMLAL.S16 Q10, D6, D6
VMLAL.S16 Q11, D6, D14
VMLAL.S16 Q10, D7, D7
VMLAL.S16 Q11, D7, D15
VLD1.S16 {Q0, Q1}, [r14]! @ load 16 excf[]
VLD1.S16 {Q2, Q3}, [r14]! @ load 16 excf[]
VLD1.S16 {Q4, Q5}, [r12]! @ load 16 x[]
VLD1.S16 {Q6, Q7}, [r12]! @ load 16 x[]
VMLAL.S16 Q10, D0, D0
VMLAL.S16 Q11, D0, D8
VMLAL.S16 Q10, D1, D1
VMLAL.S16 Q11, D1, D9
VMLAL.S16 Q10, D2, D2
VMLAL.S16 Q11, D2, D10
VMLAL.S16 Q10, D3, D3
VMLAL.S16 Q11, D3, D11
VMLAL.S16 Q10, D4, D4
VMLAL.S16 Q11, D4, D12
VMLAL.S16 Q10, D5, D5
VMLAL.S16 Q11, D5, D13
VMLAL.S16 Q10, D6, D6
VMLAL.S16 Q11, D6, D14
VMLAL.S16 Q10, D7, D7
VMLAL.S16 Q11, D7, D15
VQADD.S32 D20, D20, D21
VQADD.S32 D22, D22, D23
VPADD.S32 D20, D20, D20 @D20[0] --- L_tmp1 << 1
VPADD.S32 D22, D22, D22 @D22[0] --- L_tmp << 1
VMOV.S32 r6, D20[0]
VMOV.S32 r5, D22[0]
@r5 --- L_tmp, r6 --- L_tmp1
MOV r10, #1
ADD r5, r10, r5, LSL #1 @L_tmp = (L_tmp << 1) + 1
ADD r6, r10, r6, LSL #1 @L_tmp1 = (L_tmp1 << 1) + 1
CLZ r10, r5
CMP r5, #0
RSBLT r11, r5, #0
CLZLT r10, r11
SUB r10, r10, #1 @exp = norm_l(L_tmp)
MOV r5, r5, LSL r10 @L_tmp = (L_tmp << exp)
RSB r10, r10, #30 @exp_corr = 30 - exp
MOV r11, r5, ASR #16 @corr = extract_h(L_tmp)
CLZ r5, r6
SUB r5, r5, #1
MOV r6, r6, LSL r5 @L_tmp = (L_tmp1 << exp)
RSB r5, r5, #30 @exp_norm = 30 - exp
@r10 --- exp_corr, r11 --- corr
@r6 --- L_tmp, r5 --- exp_norm
@Isqrt_n(&L_tmp, &exp_norm)
MOV r14, r0
MOV r12, r1
STMFD sp!, {r0 - r4, r7 - r12, r14}
ADD r1, sp, #4
ADD r0, sp, #0
STR r6, [sp]
STRH r5, [sp, #4]
BL Isqrt_n
LDR r6, [sp]
LDRSH r5, [sp, #4]
LDMFD sp!, {r0 - r4, r7 - r12, r14}
MOV r0, r14
MOV r1, r12
MOV r6, r6, ASR #16 @norm = extract_h(L_tmp)
MUL r12, r6, r11
ADD r12, r12, r12 @L_tmp = vo_L_mult(corr, norm)
ADD r6, r10, r5
ADD r6, r6, r7 @exp_corr + exp_norm + scale
CMP r6, #0
RSBLT r6, r6, #0
MOVLT r12, r12, ASR r6
MOVGT r12, r12, LSL r6 @L_tmp = L_shl(L_tmp, exp_corr + exp_norm + scale)
ADD r12, r12, r8
MOV r12, r12, ASR #16 @vo_round(L_tmp)
LDR r5, [r13, #CORR_NORM] @ get corr_norm address
LDR r6, [r13, #T_MAX] @ get t_max
ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address
STRH r12, [r10] @ corr_norm[t] = vo_round(L_tmp)
CMP r4, r6
BEQ Norm_corr_asm_end
ADD r4, r4, #1 @ t_min ++
RSB r5, r4, #0 @ k
MOV r6, #63 @ i = 63
MOV r8, r0 @ exc[]
MOV r9, r2 @ h[]
ADD r10, r13, #20 @ excf[]
ADD r8, r8, r5, LSL #1 @ exc[k] address
ADD r9, r9, r6, LSL #1 @ h[i] address
ADD r10, r10, r6, LSL #1 @ excf[i] address
LDRSH r11, [r8] @ tmp = exc[k]
LOOPK:
LDRSH r8, [r9], #-2 @ load h[i]
LDRSH r12, [r10, #-2] @ load excf[i - 1]
MUL r14, r11, r8
MOV r8, r14, ASR #15
ADD r14, r8, r12
STRH r14, [r10], #-2
SUBS r6, r6, #1
BGT LOOPK
LDRSH r8, [r9] @ load h[0]
MUL r14, r11, r8
LDR r6, [r13, #T_MAX] @ get t_max
MOV r8, r14, ASR #15
STRH r8, [r10]
CMP r4, r6
BLE LOOPFOR
Norm_corr_asm_end:
ADD r13, r13, #voSTACK
LDMFD r13!, {r4 - r12, r15}
.end
|
ProjectEverest/frameworks_av | 7,778 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Filt_6k_7k_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@void Filt_6k_7k(
@ Word16 signal[], /* input: signal */
@ Word16 lg, /* input: length of input */
@ Word16 mem[] /* in/out: memory (size=30) */
@)
@***********************************************************************
@ r0 --- signal[]
@ r1 --- lg
@ r2 --- mem[]
.section .text
.global Filt_6k_7k_asm
.extern fir_6k_7k
.hidden fir_6k_7k
Filt_6k_7k_asm:
STMFD r13!, {r0 - r12, r14}
SUB r13, r13, #240 @ x[L_SUBFR16k + (L_FIR - 1)]
MOV r8, r0 @ copy signal[] address
MOV r5, r2 @ copy mem[] address
MOV r0, r2
MOV r1, r13
VLD1.S16 {D0, D1, D2, D3}, [r0]!
VLD1.S16 {D4, D5, D6, D7}, [r0]!
VST1.S16 {D0, D1, D2, D3}, [r1]!
VST1.S16 {D4, D5, D6}, [r1]!
VST1.S16 D7[0], [r1]!
VST1.S16 D7[1], [r1]!
ADR r3, Lable1 @ get fir_7k address
LDR r10, [r3]
ADD r10, r3
MOV r3, r8 @ change myMemCopy to Copy, due to Copy will change r3 content
ADD r6, r13, #60 @ get x[L_FIR - 1] address
MOV r7, r3 @ get signal[i]
@for (i = lg - 1@ i >= 0@ i--)
@{
@ x[i + L_FIR - 1] = signal[i] >> 2@
@}
VLD1.S16 {Q0, Q1}, [r7]! @ signal[0] ~ signal[15]
VLD1.S16 {Q2, Q3}, [r7]! @ signal[16] ~ signal[31]
VLD1.S16 {Q4, Q5}, [r7]! @ signal[32] ~ signal[47]
VLD1.S16 {Q6, Q7}, [r7]! @ signal[48] ~ signal[63]
VLD1.S16 {Q8, Q9}, [r7]! @ signal[64] ~ signal[79]
VSHR.S16 Q10, Q0, #2
VSHR.S16 Q11, Q1, #2
VSHR.S16 Q12, Q2, #2
VSHR.S16 Q13, Q3, #2
VST1.S16 {Q10, Q11}, [r6]!
VSHR.S16 Q0, Q4, #2
VSHR.S16 Q1, Q5, #2
VSHR.S16 Q10, Q6, #2
VSHR.S16 Q11, Q7, #2
VSHR.S16 Q2, Q8, #2
VSHR.S16 Q3, Q9, #2
VST1.S16 {Q12, Q13}, [r6]!
VST1.S16 {Q0, Q1}, [r6]!
VST1.S16 {Q10, Q11}, [r6]!
VST1.S16 {Q2, Q3}, [r6]!
MOV r12, r5
@STR r5, [sp, #-4] @ PUSH r5 to stack
@ not use registers: r4, r10, r12, r14, r5
MOV r4, r13
MOV r5, #0 @ i = 0
@ r4 --- x[i], r10 ---- fir_6k_7k
VLD1.S16 {Q0, Q1}, [r10]! @fir_6k_7k[0] ~ fir_6k_7k[15]
VLD1.S16 {Q2, Q3}, [r10]! @fir_6k_7k[16] ~ fir_6k_7k[31]
VMOV.S16 D7[3], r5 @set fir_6k_7K = 0
VLD1.S16 {Q4, Q5}, [r4]! @x[0] ~ x[15]
VLD1.S16 {Q6, Q7}, [r4]! @x[16] ~ X[31]
VLD1.S16 {Q8}, [r4]!
VMOV.S16 Q15, #0
LOOP_6K7K:
VMULL.S16 Q9,D8,D0[0]
VMULL.S16 Q10,D9,D1[0]
VMULL.S16 Q11,D9,D0[0]
VMULL.S16 Q12,D10,D1[0]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[0]
VMLAL.S16 Q10,D11,D3[0]
VMLAL.S16 Q11,D11,D2[0]
VMLAL.S16 Q12,D12,D3[0]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[0]
VMLAL.S16 Q10,D13,D5[0]
VMLAL.S16 Q11,D13,D4[0]
VMLAL.S16 Q12,D14,D5[0]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[0]
VMLAL.S16 Q10,D15,D7[0]
VMLAL.S16 Q11,D15,D6[0]
VMLAL.S16 Q12,D16,D7[0]
VEXT.8 Q7,Q7,Q8,#2
VMLAL.S16 Q9,D8,D0[1]
VMLAL.S16 Q10,D9,D1[1]
VEXT.8 Q8,Q8,Q15,#2
VMLAL.S16 Q11,D9,D0[1]
VMLAL.S16 Q12,D10,D1[1]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[1]
VMLAL.S16 Q10,D11,D3[1]
VMLAL.S16 Q11,D11,D2[1]
VMLAL.S16 Q12,D12,D3[1]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[1]
VMLAL.S16 Q10,D13,D5[1]
VMLAL.S16 Q11,D13,D4[1]
VMLAL.S16 Q12,D14,D5[1]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[1]
VMLAL.S16 Q10,D15,D7[1]
VMLAL.S16 Q11,D15,D6[1]
VMLAL.S16 Q12,D16,D7[1]
VEXT.8 Q7,Q7,Q8,#2
VMLAL.S16 Q9,D8,D0[2]
VMLAL.S16 Q10,D9,D1[2]
VEXT.8 Q8,Q8,Q15,#2
VMLAL.S16 Q11,D9,D0[2]
VMLAL.S16 Q12,D10,D1[2]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[2]
VMLAL.S16 Q10,D11,D3[2]
VMLAL.S16 Q11,D11,D2[2]
VMLAL.S16 Q12,D12,D3[2]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[2]
VMLAL.S16 Q10,D13,D5[2]
VMLAL.S16 Q11,D13,D4[2]
VMLAL.S16 Q12,D14,D5[2]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[2]
VMLAL.S16 Q10,D15,D7[2]
VMLAL.S16 Q11,D15,D6[2]
VMLAL.S16 Q12,D16,D7[2]
VEXT.8 Q7,Q7,Q8,#2
VMLAL.S16 Q9,D8,D0[3]
VMLAL.S16 Q10,D9,D1[3]
VEXT.8 Q8,Q8,Q15,#2
VMLAL.S16 Q11,D9,D0[3]
VMLAL.S16 Q12,D10,D1[3]
VEXT.8 Q4,Q4,Q5,#2
VMLAL.S16 Q9,D10,D2[3]
VMLAL.S16 Q10,D11,D3[3]
VMLAL.S16 Q11,D11,D2[3]
VMLAL.S16 Q12,D12,D3[3]
VEXT.8 Q5,Q5,Q6,#2
VMLAL.S16 Q9,D12,D4[3]
VMLAL.S16 Q10,D13,D5[3]
VMLAL.S16 Q11,D13,D4[3]
VMLAL.S16 Q12,D14,D5[3]
VEXT.8 Q6,Q6,Q7,#2
VMLAL.S16 Q9,D14,D6[3]
VMLAL.S16 Q10,D15,D7[3]
VMLAL.S16 Q11,D15,D6[3]
VMLAL.S16 Q12,D16,D7[3]
VEXT.8 Q7,Q7,Q8,#2
VMOV.S16 D8,D9
VEXT.8 Q8,Q8,Q15,#2
VMOV.S16 D9,D10
VADD.S32 Q9,Q9,Q10
VMOV.S16 D10,D11
VMOV.S16 D11,D12
VADD.S32 Q11,Q11,Q12
VMOV.S16 D12,D13
VQRSHRN.S32 D28,Q9,#15
VMOV.S16 D13,D14
VMOV.S16 D14,D15
VQRSHRN.S32 D29,Q11,#15
VMOV.S16 D15,D16
VLD1.S16 {Q8},[r4]!
ADD r5, r5, #8
CMP r5, #80
VST1.S16 {D28,D29},[r3]!
BLT LOOP_6K7K
ADD r0, r13, #160 @x + lg
MOV r1, r12
@LDR r1, [sp, #-4] @mem address
VLD1.S16 {D0, D1, D2, D3}, [r0]!
VLD1.S16 {D4, D5, D6, D7}, [r0]!
VST1.S16 {D0, D1, D2, D3}, [r1]!
VST1.S16 {D4, D5, D6}, [r1]!
VST1.S16 D7[0], [r1]!
VST1.S16 D7[1], [r1]!
Filt_6k_7k_end:
ADD r13, r13, #240
LDMFD r13!, {r0 - r12, r15}
Lable1:
.word fir_6k_7k-Lable1
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 3,845 | media/module/codecs/amrwb/enc/src/asm/ARMV7/syn_filt_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@void Syn_filt(
@ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
@ Word16 x[], /* (i) : input signal */
@ Word16 y[], /* (o) : output signal */
@ Word16 mem[], /* (i/o) : memory associated with this filtering. */
@)
@***********************************************************************
@ a[] --- r0
@ x[] --- r1
@ y[] --- r2
@ mem[] --- r3
@ m --- 16 lg --- 80 update --- 1
.section .text
.global Syn_filt_asm
Syn_filt_asm:
STMFD r13!, {r4 - r12, r14}
SUB r13, r13, #700 @ y_buf[L_FRAME16k + M16k]
MOV r4, r3 @ copy mem[] address
MOV r5, r13 @ copy yy = y_buf address
@ for(i = 0@ i < m@ i++)
@{
@ *yy++ = mem[i]@
@}
VLD1.S16 {D0, D1, D2, D3}, [r4]! @load 16 mems
VST1.S16 {D0, D1, D2, D3}, [r5]! @store 16 mem[] to *yy
LDRSH r5, [r0], #2 @ load a[0]
MOV r8, #0 @ i = 0
MOV r5, r5, ASR #1 @ a0 = a[0] >> 1
VMOV.S16 D8[0], r5
@ load all a[]
VLD1.S16 {D0, D1, D2, D3}, [r0]! @ load a[1] ~ a[16]
VREV64.16 D0, D0
VREV64.16 D1, D1
VREV64.16 D2, D2
VREV64.16 D3, D3
MOV r8, #0 @ loop times
MOV r10, r13 @ temp = y_buf
ADD r4, r13, #32 @ yy[i] address
VLD1.S16 {D4, D5, D6, D7}, [r10]! @ first 16 temp_p
SYN_LOOP:
LDRSH r6, [r1], #2 @ load x[i]
MUL r12, r6, r5 @ L_tmp = x[i] * a0
ADD r10, r4, r8, LSL #1 @ y[i], yy[i] address
VDUP.S32 Q10, r12
VMULL.S16 Q5, D3, D4
VMLAL.S16 Q5, D2, D5
VMLAL.S16 Q5, D1, D6
VMLAL.S16 Q5, D0, D7
VEXT.8 D4, D4, D5, #2
VEXT.8 D5, D5, D6, #2
VEXT.8 D6, D6, D7, #2
VPADD.S32 D12, D10, D11
ADD r8, r8, #1
VPADD.S32 D10, D12, D12
VDUP.S32 Q7, D10[0]
VSUB.S32 Q9, Q10, Q7
VQRSHRN.S32 D20, Q9, #12
VMOV.S16 r9, D20[0]
VEXT.8 D7, D7, D20, #2
CMP r8, #80
STRH r9, [r10] @ yy[i]
STRH r9, [r2], #2 @ y[i]
BLT SYN_LOOP
@ update mem[]
ADD r5, r13, #160 @ yy[64] address
VLD1.S16 {D0, D1, D2, D3}, [r5]!
VST1.S16 {D0, D1, D2, D3}, [r3]!
Syn_filt_asm_end:
ADD r13, r13, #700
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
.end
|
ProjectEverest/frameworks_av | 4,847 | media/module/codecs/amrwb/enc/src/asm/ARMV7/Dot_p_neon.s | @/*
@ ** Copyright 2003-2010, VisualOn, Inc.
@ **
@ ** Licensed under the Apache License, Version 2.0 (the "License");
@ ** you may not use this file except in compliance with the License.
@ ** You may obtain a copy of the License at
@ **
@ ** http://www.apache.org/licenses/LICENSE-2.0
@ **
@ ** Unless required by applicable law or agreed to in writing, software
@ ** distributed under the License is distributed on an "AS IS" BASIS,
@ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ ** See the License for the specific language governing permissions and
@ ** limitations under the License.
@ */
@
@**********************************************************************/
@Word32 Dot_product12( /* (o) Q31: normalized result (1 < val <= -1) */
@ Word16 x[], /* (i) 12bits: x vector */
@ Word16 y[], /* (i) 12bits: y vector */
@ Word16 lg, /* (i) : vector length */
@ Word16 * exp /* (o) : exponent of result (0..+30) */
@)
@************************************************************************
@ x[] --- r0
@ y[] --- r1
@ lg --- r2
@ *exp --- r3
.section .text
.global Dot_product12_asm
Dot_product12_asm:
STMFD r13!, {r4 - r12, r14}
CMP r0, r1
BEQ LOOP_EQ
VLD1.S16 {Q0, Q1}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q2, Q3}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q4, Q5}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q6, Q7}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q8, Q9}, [r1]! @load 16 Word16 y[]
VLD1.S16 {Q10, Q11}, [r1]! @load 16 Word16 y[]
VLD1.S16 {Q12, Q13}, [r1]! @load 16 Word16 y[]
VMULL.S16 Q15, D16, D0
VMLAL.S16 Q15, D17, D1
VMLAL.S16 Q15, D18, D2
VMLAL.S16 Q15, D19, D3
VLD1.S16 {Q0, Q1}, [r1]! @load 16 Word16 y[]
VMLAL.S16 Q15, D20, D4
VMLAL.S16 Q15, D21, D5
VMLAL.S16 Q15, D22, D6
VMLAL.S16 Q15, D23, D7
VMLAL.S16 Q15, D24, D8
VMLAL.S16 Q15, D25, D9
VMLAL.S16 Q15, D26, D10
VMLAL.S16 Q15, D27, D11
VMLAL.S16 Q15, D0, D12
VMLAL.S16 Q15, D1, D13
VMLAL.S16 Q15, D2, D14
VMLAL.S16 Q15, D3, D15
CMP r2, #64
BEQ Lable1
VLD1.S16 {Q0, Q1}, [r0]! @load 16 Word16 x[]
VLD1.S16 {Q2, Q3}, [r1]!
VMLAL.S16 Q15, D4, D0
VMLAL.S16 Q15, D5, D1
VMLAL.S16 Q15, D6, D2
VMLAL.S16 Q15, D7, D3
BL Lable1
LOOP_EQ:
VLD1.S16 {Q0, Q1}, [r0]!
VLD1.S16 {Q2, Q3}, [r0]!
VLD1.S16 {Q4, Q5}, [r0]!
VLD1.S16 {Q6, Q7}, [r0]!
VMULL.S16 Q15, D0, D0
VMLAL.S16 Q15, D1, D1
VMLAL.S16 Q15, D2, D2
VMLAL.S16 Q15, D3, D3
VMLAL.S16 Q15, D4, D4
VMLAL.S16 Q15, D5, D5
VMLAL.S16 Q15, D6, D6
VMLAL.S16 Q15, D7, D7
VMLAL.S16 Q15, D8, D8
VMLAL.S16 Q15, D9, D9
VMLAL.S16 Q15, D10, D10
VMLAL.S16 Q15, D11, D11
VMLAL.S16 Q15, D12, D12
VMLAL.S16 Q15, D13, D13
VMLAL.S16 Q15, D14, D14
VMLAL.S16 Q15, D15, D15
CMP r2, #64
BEQ Lable1
VLD1.S16 {Q0, Q1}, [r0]!
VMLAL.S16 Q15, D0, D0
VMLAL.S16 Q15, D1, D1
VMLAL.S16 Q15, D2, D2
VMLAL.S16 Q15, D3, D3
Lable1:
VQADD.S32 D30, D30, D31
VPADD.S32 D30, D30, D30
VMOV.S32 r12, D30[0]
ADD r12, r12, r12
ADD r12, r12, #1 @ L_sum = (L_sum << 1) + 1
MOV r4, r12
CMP r12, #0
RSBLT r4, r12, #0
CLZ r10, r4
SUB r10, r10, #1 @ sft = norm_l(L_sum)
MOV r0, r12, LSL r10 @ L_sum = L_sum << sft
RSB r11, r10, #30 @ *exp = 30 - sft
STRH r11, [r3]
Dot_product12_end:
LDMFD r13!, {r4 - r12, r15}
.end
|
Project-Mist-OS/packages_modules_Virtualization | 4,679 | vmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
Project-Mist-OS/packages_modules_Virtualization | 1,788 | vmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
Project-Mist-OS/packages_modules_Virtualization | 5,154 | vmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
Project-Mist-OS/packages_modules_Virtualization | 2,259 | rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .DTB_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .TEXT_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (2 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0: .quad .L_BLOCK_RO | .DTB_ADDR // DT provided by VMM
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
Project-Mist-OS/packages_modules_Virtualization | 1,745 | pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
Project-Mist-OS/packages_modules_Virtualization | 1,813 | vmbase/example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
/* level 2 */
0: .quad .L_BLOCK_MEM | 0x80000000 // DT provided by VMM
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
|
ProjectInfinity-X/packages_modules_Virtualization | 4,679 | libs/libvmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
ProjectInfinity-X/packages_modules_Virtualization | 1,788 | libs/libvmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
ProjectInfinity-X/packages_modules_Virtualization | 5,154 | libs/libvmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
ProjectInfinity-X/packages_modules_Virtualization | 2,102 | guest/vmbase_example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
0: /* level 2 */
#if defined(VMBASE_EXAMPLE_IS_BIOS)
.quad 0 // 2 MiB not mapped (DT)
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
#elif defined(VMBASE_EXAMPLE_IS_KERNEL)
.quad .L_BLOCK_MEM_XIP | 0x80000000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80200000 // 2 MiB of writable DRAM
.fill 510, 8, 0x0
#else
#error "Unexpected vmbase_example mode: failed to generate idmap"
#endif
|
ProjectInfinity-X/packages_modules_Virtualization | 2,161 | guest/rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .TEXT_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0:
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
ProjectInfinity-X/packages_modules_Virtualization | 1,745 | guest/pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
project-starch/capslock-rust | 11,809 | library/std/src/sys/pal/sgx/abi/entry.S | /* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
Prom11111/plumbingtech | 15,150 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zstd-sys-2.0.15+zstd.1.5.7/zstd/lib/decompress/huf_decompress_amd64.S | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
#if defined(__ELF__) && defined(__GNUC__)
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
.section .note.GNU-stack,"",%progbits
#if defined(__aarch64__)
/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64.
* See: https://github.com/facebook/zstd/issues/3841
* See: https://gcc.godbolt.org/z/sqr5T4ffK
* See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/
* See: https://reviews.llvm.org/D62609
*/
.pushsection .note.gnu.property, "a"
.p2align 3
.long 4 /* size of the name - "GNU\0" */
.long 0x10 /* size of descriptor */
.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */
.asciz "GNU"
.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4 /* pr_datasz - 4 bytes */
.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */
.p2align 3 /* pr_padding - bring everything to 8 byte alignment */
.popsection
#endif
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilowest */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %rbx /* rbx = ip0 - ilowest */
/* rdx = (ip0 - ilowest) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilowest */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilowest */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %r15 /* r15 = ip0 - ilowest */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilowest) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilowest */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
Prom11111/plumbingtech | 1,569 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/riscv64.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li x10, STACK_DIRECTION_DESCENDING
jr x1
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
add x10, x2, x0
jr x1
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */
.cfi_startproc
add x2, x12, x0
jr x11
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */
.cfi_startproc
sd x1, -8(x13)
sd x2, -16(x13)
addi x2, x13, -16
.cfi_def_cfa x2, 16
.cfi_offset x1, -8
.cfi_offset x2, -16
jalr x1, x12, 0
ld x1, 8(x2)
.cfi_restore x1
ld x2, 0(x2)
.cfi_restore x2
jr x1
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 1,571 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/riscv.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li x10, STACK_DIRECTION_DESCENDING
jr x1
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
add x10, x2, x0
jr x1
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */
.cfi_startproc
add x2, x12, x0
jr x11
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */
.cfi_startproc
sw x1, -12(x13)
sw x2, -16(x13)
addi x2, x13, -16
.cfi_def_cfa x2, 16
.cfi_offset x1, -12
.cfi_offset x2, -16
jalr x1, x12, 0
lw x1, 4(x2)
.cfi_restore x1
lw x2, 0(x2)
.cfi_restore x2
jr x1
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 2,137 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/aarch_aapcs64.s | #include "psm.h"
.text
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define TYPE(fnname)
#define FUNCTION(fnname) _##fnname
#define SIZE(fnname,endlabel)
#elif CFG_TARGET_OS_windows
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname)
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel)
#else
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname) .type fnname,@function
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#endif
GLOBL(rust_psm_stack_direction)
.p2align 2
TYPE(rust_psm_stack_direction)
FUNCTION(rust_psm_stack_direction):
/* extern "C" fn() -> u8 */
.cfi_startproc
orr w0, wzr, #STACK_DIRECTION_DESCENDING
ret
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
GLOBL(rust_psm_stack_pointer)
.p2align 2
TYPE(rust_psm_stack_pointer)
FUNCTION(rust_psm_stack_pointer):
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mov x0, sp
ret
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
GLOBL(rust_psm_replace_stack)
.p2align 2
TYPE(rust_psm_replace_stack)
FUNCTION(rust_psm_replace_stack):
/* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */
.cfi_startproc
/* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */
mov sp, x2
br x1
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
GLOBL(rust_psm_on_stack)
.p2align 2
TYPE(rust_psm_on_stack)
FUNCTION(rust_psm_on_stack):
/* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */
.cfi_startproc
stp x29, x30, [sp, #-16]!
.cfi_def_cfa sp, 16
mov x29, sp
.cfi_def_cfa x29, 16
.cfi_offset x29, -16
.cfi_offset x30, -8
mov sp, x3
blr x2
mov sp, x29
.cfi_def_cfa sp, 16
ldp x29, x30, [sp], #16
.cfi_def_cfa sp, 0
.cfi_restore x29
.cfi_restore x30
ret
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
|
Prom11111/plumbingtech | 2,299 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/arm_aapcs.s | #include "psm.h"
.text
.syntax unified
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define THUMBTYPE(fnname) .thumb_func _##fnname
#define FUNCTION(fnname) _##fnname
#define THUMBFN .code 16
#define SIZE(fnname,endlabel)
#define FNSTART
#define CANTUNWIND
#define FNEND
#else
#define GLOBL(fnname) .globl fnname
#define THUMBTYPE(fnname) .type fnname,%function
#define FUNCTION(fnname) fnname
#define THUMBFN .code 16
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#define FNSTART .fnstart
#define CANTUNWIND .cantunwind
#define FNEND .fnend
#endif
GLOBL(rust_psm_stack_direction)
.p2align 2
THUMBTYPE(rust_psm_stack_direction)
THUMBFN
FUNCTION(rust_psm_stack_direction):
/* extern "C" fn() -> u8 */
FNSTART
.cfi_startproc
/* movs to support Thumb-1 */
movs r0, #STACK_DIRECTION_DESCENDING
bx lr
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
CANTUNWIND
FNEND
GLOBL(rust_psm_stack_pointer)
.p2align 2
THUMBTYPE(rust_psm_stack_pointer)
THUMBFN
FUNCTION(rust_psm_stack_pointer):
/* extern "C" fn() -> *mut u8 */
FNSTART
.cfi_startproc
mov r0, sp
bx lr
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
CANTUNWIND
FNEND
GLOBL(rust_psm_replace_stack)
.p2align 2
THUMBTYPE(rust_psm_replace_stack)
THUMBFN
FUNCTION(rust_psm_replace_stack):
/* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */
FNSTART
.cfi_startproc
/* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */
mov sp, r2
bx r1
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
CANTUNWIND
FNEND
GLOBL(rust_psm_on_stack)
.p2align 2
THUMBTYPE(rust_psm_on_stack)
THUMBFN
FUNCTION(rust_psm_on_stack):
/* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */
FNSTART
.cfi_startproc
push {r4, lr}
.cfi_def_cfa_offset 8
mov r4, sp
.cfi_def_cfa_register r4
.cfi_offset lr, -4
.cfi_offset r4, -8
mov sp, r3
blx r2
mov sp, r4
.cfi_restore sp
pop {r4, pc}
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
CANTUNWIND
FNEND
|
Prom11111/plumbingtech | 2,557 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86.s | #include "psm.h"
/* NOTE: fastcall calling convention used on all x86 targets */
.text
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define TYPE(fnname)
#define FUNCTION(fnname) _##fnname
#define SIZE(fnname,endlabel)
#else
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname) .type fnname,@function
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#endif
GLOBL(rust_psm_stack_direction)
.p2align 4
TYPE(rust_psm_stack_direction)
FUNCTION(rust_psm_stack_direction):
/* extern "fastcall" fn() -> u8 (%al) */
.cfi_startproc
movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64
retl
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
GLOBL(rust_psm_stack_pointer)
.p2align 4
TYPE(rust_psm_stack_pointer)
FUNCTION(rust_psm_stack_pointer):
/* extern "fastcall" fn() -> *mut u8 (%rax) */
.cfi_startproc
leal 4(%esp), %eax
retl
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
GLOBL(rust_psm_replace_stack)
.p2align 4
TYPE(rust_psm_replace_stack)
FUNCTION(rust_psm_replace_stack):
/* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx
Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address
would be store there) off the required stack alignment on entry. To offset the stack in such a
way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but
would require to adjust the stack manually, which cannot be easily done, because the stack
pointer argument is already stored in memory.
*/
movl 4(%esp), %esp
calll *%edx
ud2
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
GLOBL(rust_psm_on_stack)
.p2align 4
TYPE(rust_psm_on_stack)
FUNCTION(rust_psm_on_stack):
/* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */
.cfi_startproc
pushl %ebp
.cfi_def_cfa %esp, 8
.cfi_offset %ebp, -8
movl %esp, %ebp
.cfi_def_cfa_register %ebp
movl 12(%ebp), %esp
calll *8(%ebp)
movl %ebp, %esp
popl %ebp
.cfi_def_cfa %esp, 4
retl $8
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
|
Prom11111/plumbingtech | 3,609 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64_aix.s | .csect .text[PR],2
.file "powerpc64_aix.s"
.globl rust_psm_stack_direction[DS]
.globl .rust_psm_stack_direction
.align 4
.csect rust_psm_stack_direction[DS],3
.vbyte 8, .rust_psm_stack_direction
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_stack_direction:
# extern "C" fn() -> u8
li 3, 2
blr
L..rust_psm_stack_direction_end:
# Following bytes form the traceback table on AIX.
# For specification, see https://www.ibm.com/docs/en/aix/7.2?topic=processor-traceback-tables.
# For implementation, see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp,
# `PPCAIXAsmPrinter::emitTracebackTable`.
.vbyte 4, 0x00000000 # Traceback table begin, for unwinder to search the table.
.byte 0x00 # Version = 0
.byte 0x09 # Language = CPlusPlus, since rust is using C++-like LSDA.
.byte 0x20 # -IsGlobaLinkage, -IsOutOfLineEpilogOrPrologue
# +HasTraceBackTableOffset, -IsInternalProcedure
# -HasControlledStorage, -IsTOCless
# -IsFloatingPointPresent
# -IsFloatingPointOperationLogOrAbortEnabled
.byte 0x40 # -IsInterruptHandler, +IsFunctionNamePresent, -IsAllocaUsed
# OnConditionDirective = 0, -IsCRSaved, -IsLRSaved
.byte 0x80 # +IsBackChainStored, -IsFixup, NumOfFPRsSaved = 0
.byte 0x00 # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 0
.byte 0x00 # NumberOfFixedParms = 0
.byte 0x01 # NumberOfFPParms = 0, +HasParmsOnStack
.vbyte 4, L..rust_psm_stack_direction_end-.rust_psm_stack_direction #Function size
.vbyte 2, 0x0018 # Function name len = 24
.byte "rust_psm_stack_direction" # Function Name
.globl rust_psm_stack_pointer[DS]
.globl .rust_psm_stack_pointer
.align 4
.csect rust_psm_stack_pointer[DS],3
.vbyte 8, .rust_psm_stack_pointer
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_stack_pointer:
# extern "C" fn() -> *mut u8
mr 3, 1
blr
L..rust_psm_stack_pointer_end:
.vbyte 4, 0x00000000
.byte 0x00
.byte 0x09
.byte 0x20
.byte 0x40
.byte 0x80
.byte 0x00
.byte 0x00
.byte 0x01
.vbyte 4, L..rust_psm_stack_pointer_end-.rust_psm_stack_pointer
.vbyte 2, 0x0016
.byte "rust_psm_stack_pointer"
.globl rust_psm_replace_stack[DS]
.globl .rust_psm_replace_stack
.align 4
.csect rust_psm_replace_stack[DS],3
.vbyte 8, .rust_psm_replace_stack
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_replace_stack:
# extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8)
# Load the function pointer and toc pointer from TOC and make the call.
ld 2, 8(4)
ld 4, 0(4)
addi 5, 5, -48
mr 1, 5
mtctr 4
bctr
L..rust_psm_replace_stack_end:
.vbyte 4, 0x00000000
.byte 0x00
.byte 0x09
.byte 0x20
.byte 0x40
.byte 0x80
.byte 0x00
.byte 0x03
.byte 0x01
.vbyte 4, 0x00000000 # Parameter type = i, i, i
.vbyte 4, L..rust_psm_replace_stack_end-.rust_psm_replace_stack
.vbyte 2, 0x0016
.byte "rust_psm_replace_stack"
.globl rust_psm_on_stack[DS]
.globl .rust_psm_on_stack
.align 4
.csect rust_psm_on_stack[DS],3
.vbyte 8, .rust_psm_on_stack
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_on_stack:
# extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8)
mflr 0
std 2, -72(6)
std 0, -8(6)
sub 6, 6, 1
addi 6, 6, -112
stdux 1, 1, 6
ld 2, 8(5)
ld 5, 0(5)
mtctr 5
bctrl
ld 2, 40(1)
ld 0, 104(1)
mtlr 0
ld 1, 0(1)
blr
L..rust_psm_on_stack_end:
.vbyte 4, 0x00000000
.byte 0x00
.byte 0x09
.byte 0x20
.byte 0x41
.byte 0x80
.byte 0x00
.byte 0x04
.byte 0x01
.vbyte 4, 0x00000000 # Parameter type = i, i, i, i
.vbyte 4, L..rust_psm_on_stack_end-.rust_psm_on_stack
.vbyte 2, 0x0011
.byte "rust_psm_on_stack"
.toc
|
Prom11111/plumbingtech | 2,045 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64_openpower.s | /* Implementation of stack swtiching routines for OpenPOWER 64-bit ELF ABI
The specification can be found at
http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html
This ABI is usually used by the ppc64le targets.
*/
#include "psm.h"
.text
.abiversion 2
.globl rust_psm_stack_direction
.p2align 4
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 4
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 4
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
addi 5, 5, -32
mtctr 4
mr 12, 4
mr 1, 5
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 4
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
std 0, -8(6)
std 2, -24(6)
sub 6, 6, 1
addi 6, 6, -48
stdux 1, 1, 6
.cfi_def_cfa r1, 48
.cfi_offset r1, -48
.cfi_offset r2, -24
.cfi_offset lr, -8
mr 12, 5
mtctr 5
bctrl
ld 2, 24(1)
.cfi_restore r2
ld 0, 40(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instructin backtrace breaks until control returns to the caller */
ld 1, 0(1)
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 2,218 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86_64_windows_gnu.s | .text
.def rust_psm_stack_direction
.scl 2
.type 32
.endef
.globl rust_psm_stack_direction
.p2align 4
rust_psm_stack_direction:
/* extern "sysv64" fn() -> u8 (%al) */
.cfi_startproc
movb $2, %al # always descending on x86_64
retq
.cfi_endproc
.def rust_psm_stack_pointer
.scl 2
.type 32
.endef
.globl rust_psm_stack_pointer
.p2align 4
rust_psm_stack_pointer:
/* extern "sysv64" fn() -> *mut u8 (%rax) */
.cfi_startproc
leaq 8(%rsp), %rax
retq
.cfi_endproc
.def rust_psm_replace_stack
.scl 2
.type 32
.endef
.globl rust_psm_replace_stack
.p2align 4
rust_psm_replace_stack:
/* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8, %rcx: *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi.
8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto
stack with a regular call
*/
movq %gs:0x08, %rdx
movq %gs:0x10, %rcx
leaq -8(%rdx), %rsp
jmpq *%rsi
.cfi_endproc
.def rust_psm_on_stack
.scl 2
.type 32
.endef
.globl rust_psm_on_stack
.p2align 4
rust_psm_on_stack:
/*
extern "sysv64" fn(%rdi: usize, %rsi: usize,
%rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8, %r8: *mut u8)
NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be
fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for
exception handlers at *all* if they happen to fall outside the are specified in TIB.
This necessitates an API difference from the usual 4-argument signature used elsewhere.
FIXME: this needs a catch-all exception handler that aborts in case somebody unwinds into here.
*/
.cfi_startproc
pushq %rbp
.cfi_def_cfa %rsp, 16
.cfi_offset %rbp, -16
pushq %gs:0x08
.cfi_def_cfa %rsp, 24
pushq %gs:0x10
.cfi_def_cfa %rsp, 32
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rcx, %gs:0x08
movq %r8, %gs:0x10
movq %rcx, %rsp
callq *%rdx
movq %rbp, %rsp
popq %gs:0x10
.cfi_def_cfa %rsp, 24
popq %gs:0x08
.cfi_def_cfa %rsp, 16
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.cfi_endproc
|
Prom11111/plumbingtech | 2,541 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64.s | /* Implementation of the AIX-like PowerPC ABI. Seems to be used by the big-endian PowerPC targets.
The following references were used during the implementation of this code:
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_rntime_stack.htm
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_reg_use_conv.htm
https://www.ibm.com/developerworks/library/l-powasm4/index.html
*/
#include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
ld 2, 8(4)
ld 4, 0(4)
/* do not allocate the whole 112-byte sized frame, we know wont be used */
addi 5, 5, -48
mr 1, 5
mtctr 4
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
std 2, -72(6)
std 0, -8(6)
sub 6, 6, 1
addi 6, 6, -112
stdux 1, 1, 6
.cfi_def_cfa r1, 112
.cfi_offset r1, -112
.cfi_offset r2, -72
.cfi_offset lr, -8
/* load the function pointer from TOC and make the call */
ld 2, 8(5)
ld 5, 0(5)
mtctr 5
bctrl
ld 2, 40(1)
.cfi_restore r2
ld 0, 104(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instruction backtrace breaks until control returns to the caller.
That being said compiler-generated code has the same issue, so I guess that is fine for now?
*/
ld 1, 0(1)
.cfi_restore r1
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 2,264 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86_windows_gnu.s | /* FIXME: this works locally but not on appveyor??!? */
/* NOTE: fastcall calling convention used on all x86 targets */
.text
.def @rust_psm_stack_direction@0
.scl 2
.type 32
.endef
.globl @rust_psm_stack_direction@0
.p2align 4
@rust_psm_stack_direction@0:
/* extern "fastcall" fn() -> u8 (%al) */
.cfi_startproc
movb $2, %al # always descending on x86_64
retl
.cfi_endproc
.def @rust_psm_stack_pointer@0
.scl 2
.type 32
.endef
.globl @rust_psm_stack_pointer@0
.p2align 4
@rust_psm_stack_pointer@0:
/* extern "fastcall" fn() -> *mut u8 (%rax) */
.cfi_startproc
leal 4(%esp), %eax
retl
.cfi_endproc
.def @rust_psm_replace_stack@16
.scl 2
.type 32
.endef
.globl @rust_psm_replace_stack@16
.p2align 4
@rust_psm_replace_stack@16:
/* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx
Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address
would be store there) off the required stack alignment on entry. To offset the stack in such a
way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but
would require to adjust the stack manually, which cannot be easily done, because the stack
pointer argument is already stored in memory.
*/
movl 8(%esp), %eax
mov %eax, %fs:0x08
movl 4(%esp), %esp
mov %esp, %fs:0x04
calll *%edx
ud2
.cfi_endproc
.def @rust_psm_on_stack@16
.scl 2
.type 32
.endef
.globl @rust_psm_on_stack@16
.p2align 4
@rust_psm_on_stack@16:
/* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */
.cfi_startproc
pushl %ebp
.cfi_def_cfa %esp, 8
.cfi_offset %ebp, -8
pushl %fs:0x04
.cfi_def_cfa %esp, 12
pushl %fs:0x08
.cfi_def_cfa %esp, 16
movl %esp, %ebp
.cfi_def_cfa_register %ebp
movl 24(%ebp), %eax
movl %eax, %fs:0x08
movl 20(%ebp), %esp
movl %esp, %fs:0x04
calll *16(%ebp)
movl %ebp, %esp
popl %fs:0x08
.cfi_def_cfa %esp, 12
popl %fs:0x04
.cfi_def_cfa %esp, 8
popl %ebp
.cfi_def_cfa %esp, 4
retl $12
.cfi_endproc
|
Prom11111/plumbingtech | 1,938 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/sparc64.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov STACK_DIRECTION_DESCENDING, %o0
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov %o6, %o0
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */
.cfi_startproc
.cfi_def_cfa 0, 0
.cfi_return_column 0
jmpl %o1, %g0
/* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */
add %o2, -0x7ff, %o6
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */
.cfi_startproc
/* The fact that locals and saved register windows are offset by 2kB is
very nasty property of SPARC architecture and ABI. In this case it forces us to slice off
2kB of the stack space outright for no good reason other than adapting to a botched design.
*/
save %o3, -0x87f, %o6
.cfi_def_cfa_register %fp
.cfi_window_save
.cfi_register %r15, %r31
mov %i1, %o1
jmpl %i2, %o7
mov %i0, %o0
ret
restore
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 2,144 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/mips64_eabi.s | /*
Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is
used where.
This is an "EABI" implementation based on the following page:
http://www.cygwin.com/ml/binutils/2003-06/msg00436.html
*/
#include "psm.h"
.set noreorder /* we’ll manage the delay slots on our own, thanks! */
.text
.globl rust_psm_stack_direction
.p2align 3
.type rust_psm_stack_direction,@function
.ent rust_psm_stack_direction
/* extern "C" fn() -> u8 */
rust_psm_stack_direction:
.cfi_startproc
jr $31
addiu $2, $zero, STACK_DIRECTION_DESCENDING
.end rust_psm_stack_direction
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 3
.type rust_psm_stack_pointer,@function
.ent rust_psm_stack_pointer
/* extern "C" fn() -> *mut u8 */
rust_psm_stack_pointer:
.cfi_startproc
jr $31
move $2, $29
.end rust_psm_stack_pointer
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 3
.type rust_psm_replace_stack,@function
.ent rust_psm_replace_stack
/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
rust_psm_replace_stack:
.cfi_startproc
move $25, $5
jr $5
move $29, $6
.end rust_psm_replace_stack
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 3
.type rust_psm_on_stack,@function
.ent rust_psm_on_stack
/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */
rust_psm_on_stack:
.cfi_startproc
sd $29, -8($7)
sd $31, -16($7)
.cfi_def_cfa 7, 0
.cfi_offset 31, -16
.cfi_offset 29, -8
move $25, $6
jalr $31, $6
daddiu $29, $7, -16
.cfi_def_cfa 29, 16
ld $31, 0($29)
.cfi_restore 31
ld $29, 8($29)
.cfi_restore 29
jr $31
nop
.end rust_psm_on_stack
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 2,235 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86_64.s | #include "psm.h"
/* NOTE: sysv64 calling convention is used on all x86_64 targets, including Windows! */
.text
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define TYPE(fnname)
#define FUNCTION(fnname) _##fnname
#define SIZE(fnname,endlabel)
#else
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname) .type fnname,@function
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#endif
GLOBL(rust_psm_stack_direction)
.p2align 4
TYPE(rust_psm_stack_direction)
FUNCTION(rust_psm_stack_direction):
/* extern "sysv64" fn() -> u8 (%al) */
.cfi_startproc
movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64
retq
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
GLOBL(rust_psm_stack_pointer)
.p2align 4
TYPE(rust_psm_stack_pointer)
FUNCTION(rust_psm_stack_pointer):
/* extern "sysv64" fn() -> *mut u8 (%rax) */
.cfi_startproc
leaq 8(%rsp), %rax
retq
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
GLOBL(rust_psm_replace_stack)
.p2align 4
TYPE(rust_psm_replace_stack)
FUNCTION(rust_psm_replace_stack):
/* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi.
8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto
stack with a regular call
*/
leaq -8(%rdx), %rsp
jmpq *%rsi
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
GLOBL(rust_psm_on_stack)
.p2align 4
TYPE(rust_psm_on_stack)
FUNCTION(rust_psm_on_stack):
/* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8) */
.cfi_startproc
pushq %rbp
.cfi_def_cfa %rsp, 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rcx, %rsp
callq *%rdx
movq %rbp, %rsp
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
|
Prom11111/plumbingtech | 2,149 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/mips_eabi.s | /*
Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is
used where.
This is an "EABI" implementation based on the following page:
http://www.cygwin.com/ml/binutils/2003-06/msg00436.html
*/
#include "psm.h"
.set noreorder /* we’ll manage the delay slots on our own, thanks! */
.text
.abicalls
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
.ent rust_psm_stack_direction
/* extern "C" fn() -> u8 */
rust_psm_stack_direction:
.cfi_startproc
jr $31
addiu $2, $zero, STACK_DIRECTION_DESCENDING
.end rust_psm_stack_direction
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
.ent rust_psm_stack_pointer
/* extern "C" fn() -> *mut u8 */
rust_psm_stack_pointer:
.cfi_startproc
jr $31
move $2, $29
.end rust_psm_stack_pointer
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
.ent rust_psm_replace_stack
/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
rust_psm_replace_stack:
.cfi_startproc
move $25, $5
jr $5
move $29, $6
.end rust_psm_replace_stack
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
.ent rust_psm_on_stack
/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */
rust_psm_on_stack:
.cfi_startproc
sw $29, -4($7)
sw $31, -8($7)
.cfi_def_cfa 7, 0
.cfi_offset 31, -8
.cfi_offset 29, -4
move $25, $6
jalr $31, $6
addiu $29, $7, -8
.cfi_def_cfa 29, 8
lw $31, 0($29)
.cfi_restore 31
lw $29, 4($29)
.cfi_restore 29
jr $31
nop
.end rust_psm_on_stack
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 1,568 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/loongarch64.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li.w $r4, STACK_DIRECTION_DESCENDING
jr $r1
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
move $r4, $r3
jr $r1
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
.cfi_startproc
move $r3, $r6
jr $r5
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize, usize), r7: *mut u8) */
.cfi_startproc
st.d $r1, $r7, -8
st.d $r3, $r7, -16
addi.d $r3, $r7, -16
.cfi_def_cfa 3, 16
.cfi_offset 1, -8
.cfi_offset 3, -16
jirl $r1, $r6, 0
ld.d $r1, $r3, 8
.cfi_restore 1
ld.d $r3, $r3, 0
.cfi_restore 3
jr $r1
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 1,722 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/sparc_sysv.s | #include "psm.h"
/* FIXME: this ABI has definitely not been verified at all */
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov STACK_DIRECTION_DESCENDING, %o0
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov %o6, %o0
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */
.cfi_startproc
.cfi_def_cfa 0, 0
.cfi_return_column 0
jmpl %o1, %g0
/* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */
add %o2, -0x3ff, %o6
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */
.cfi_startproc
save %o3, -0x43f, %o6
.cfi_def_cfa_register %fp
.cfi_window_save
.cfi_register %r15, %r31
mov %i1, %o1
jmpl %i2, %o7
mov %i0, %o0
ret
restore
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 1,694 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/wasm32.s | #include "psm.h"
# Note that this function is not compiled when this package is uploaded to
# crates.io, this source is only here as a reference for how the corresponding
# wasm32.o was generated. This file can be compiled with:
#
# cpp psm/src/arch/wasm32.s | llvm-mc -o psm/src/arch/wasm32.o --arch=wasm32 -filetype=obj
#
# where you'll want to ensure that `llvm-mc` is from a relatively recent
# version of LLVM.
.globaltype __stack_pointer, i32
.globl rust_psm_stack_direction
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
.functype rust_psm_stack_direction () -> (i32)
i32.const STACK_DIRECTION_DESCENDING
end_function
.globl rust_psm_stack_pointer
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
.functype rust_psm_stack_pointer () -> (i32)
global.get __stack_pointer
end_function
.globl rust_psm_on_stack
.type rust_psm_on_stack,@function
rust_psm_on_stack:
.functype rust_psm_on_stack (i32, i32, i32, i32) -> ()
# get our new stack argument, then save the old stack
# pointer into that local
local.get 3
global.get __stack_pointer
local.set 3
global.set __stack_pointer
# Call our indirect function specified
local.get 0
local.get 1
local.get 2
call_indirect (i32, i32) -> ()
# restore the stack pointer before returning
local.get 3
global.set __stack_pointer
end_function
.globl rust_psm_replace_stack
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
.functype rust_psm_replace_stack (i32, i32, i32) -> ()
local.get 2
global.set __stack_pointer
local.get 0
local.get 1
call_indirect (i32) -> ()
unreachable
end_function
|
Prom11111/plumbingtech | 2,080 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/zseries_linux.s | /* Implementation of stack swtiching routines for zSeries LINUX ABI.
This ABI is used by the s390x-unknown-linux-gnu target.
Documents used:
* LINUX for zSeries: ELF Application Binary Interface Supplement (1st ed., 2001) (LNUX-1107-01)
* z/Architecture: Principles of Operation (4th ed., 2004) (SA22-7832-03)
*/
#include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 4
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
lghi %r2, STACK_DIRECTION_DESCENDING
br %r14
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 4
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
la %r2, 0(%r15)
br %r14
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 4
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(r2: usize, r3: extern "C" fn(usize), r4: *mut u8) */
.cfi_startproc
/* FIXME: backtrace does not terminate cleanly for some reason */
lay %r15, -160(%r4)
/* FIXME: this is `basr` instead of `br` purely to remove the backtrace link to the caller */
basr %r14, %r3
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 4
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(r2: usize, r3: usize, r4: extern "C" fn(usize, usize), r5: *mut u8) */
.cfi_startproc
stmg %r14, %r15, -16(%r5)
lay %r15, -176(%r5)
.cfi_def_cfa %r15, 176
.cfi_offset %r14, -16
.cfi_offset %r15, -8
basr %r14, %r4
lmg %r14, %r15, 160(%r15)
.cfi_restore %r14
.cfi_restore %r15
br %r14
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 1,963 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc32.s | #include "psm.h"
/* FIXME: this probably does not cover all ABIs? Tested with sysv only, possibly works for AIX as
well?
*/
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
/* NOTE: perhaps add a debug-assertion for stack alignment? */
addi 5, 5, -16
mr 1, 5
mtctr 4
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
stw 0, -24(6)
sub 6, 6, 1
addi 6, 6, -32
stwux 1, 1, 6
.cfi_def_cfa r1, 32
.cfi_offset r1, -32
.cfi_offset lr, -24
mtctr 5
bctrl
lwz 0, 8(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instruction backtrace breaks until control returns to the caller
That being said compiler-generated code has the same issue, so I guess that is fine for now?
*/
lwz 1, 0(1)
.cfi_restore r1
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Prom11111/plumbingtech | 40,185 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 5
Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
.align 5
_ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
Ltail:
add x2,x2,#64
Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _ChaCha20_ctr32_neon
.private_extern _ChaCha20_ctr32_neon
.align 5
_ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b Last_neon
Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b Last_neon
Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b Last_neon
.align 4
Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b Loop_outer
Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Prom11111/plumbingtech | 18,316 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesni-gcm-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 5
_aesni_ctr32_ghash_6x:
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
vmovdqu 0-128(%rcx),%xmm15
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpaddb %xmm2,%xmm11,%xmm12
vpaddb %xmm2,%xmm12,%xmm13
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm15,%xmm1,%xmm9
vmovdqu %xmm4,16+8(%rsp)
jmp L$oop6x
.p2align 5
L$oop6x:
addl $100663296,%ebx
jc L$handle_ctr32
vmovdqu 0-32(%r9),%xmm3
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm15,%xmm10,%xmm10
vpxor %xmm15,%xmm11,%xmm11
L$resume_ctr32:
vmovdqu %xmm1,(%r8)
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
vpxor %xmm15,%xmm12,%xmm12
vmovups 16-128(%rcx),%xmm2
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
xorq %r12,%r12
cmpq %r14,%r15
vaesenc %xmm2,%xmm9,%xmm9
vmovdqu 48+8(%rsp),%xmm0
vpxor %xmm15,%xmm13,%xmm13
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
vaesenc %xmm2,%xmm10,%xmm10
vpxor %xmm15,%xmm14,%xmm14
setnc %r12b
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vmovdqu 16-32(%r9),%xmm3
negq %r12
vaesenc %xmm2,%xmm12,%xmm12
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
vpxor %xmm4,%xmm8,%xmm8
vaesenc %xmm2,%xmm13,%xmm13
vpxor %xmm5,%xmm1,%xmm4
andq $0x60,%r12
vmovups 32-128(%rcx),%xmm15
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
vaesenc %xmm2,%xmm14,%xmm14
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
leaq (%r14,%r12,1),%r14
vaesenc %xmm15,%xmm9,%xmm9
vpxor 16+8(%rsp),%xmm8,%xmm8
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
vmovdqu 64+8(%rsp),%xmm0
vaesenc %xmm15,%xmm10,%xmm10
movbeq 88(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 80(%r14),%r12
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,32+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,40+8(%rsp)
vmovdqu 48-32(%r9),%xmm5
vaesenc %xmm15,%xmm14,%xmm14
vmovups 48-128(%rcx),%xmm15
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm3,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
vaesenc %xmm15,%xmm11,%xmm11
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
vmovdqu 80+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vpxor %xmm1,%xmm4,%xmm4
vmovdqu 64-32(%r9),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vmovups 64-128(%rcx),%xmm15
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
vaesenc %xmm15,%xmm10,%xmm10
movbeq 72(%r14),%r13
vpxor %xmm5,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
movbeq 64(%r14),%r12
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
vmovdqu 96+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,48+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,56+8(%rsp)
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 96-32(%r9),%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vmovups 80-128(%rcx),%xmm15
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
vaesenc %xmm15,%xmm10,%xmm10
movbeq 56(%r14),%r13
vpxor %xmm1,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
vpxor 112+8(%rsp),%xmm8,%xmm8
vaesenc %xmm15,%xmm11,%xmm11
movbeq 48(%r14),%r12
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,64+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,72+8(%rsp)
vpxor %xmm3,%xmm4,%xmm4
vmovdqu 112-32(%r9),%xmm3
vaesenc %xmm15,%xmm14,%xmm14
vmovups 96-128(%rcx),%xmm15
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
vaesenc %xmm15,%xmm10,%xmm10
movbeq 40(%r14),%r13
vpxor %xmm2,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
movbeq 32(%r14),%r12
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,80+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,88+8(%rsp)
vpxor %xmm5,%xmm6,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor %xmm1,%xmm6,%xmm6
vmovups 112-128(%rcx),%xmm15
vpslldq $8,%xmm6,%xmm5
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 16(%r11),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm8,%xmm7,%xmm7
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm5,%xmm4,%xmm4
movbeq 24(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 16(%r14),%r12
vpalignr $8,%xmm4,%xmm4,%xmm0
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
movq %r13,96+8(%rsp)
vaesenc %xmm15,%xmm12,%xmm12
movq %r12,104+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
vmovups 128-128(%rcx),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vmovups 144-128(%rcx),%xmm15
vaesenc %xmm1,%xmm10,%xmm10
vpsrldq $8,%xmm6,%xmm6
vaesenc %xmm1,%xmm11,%xmm11
vpxor %xmm6,%xmm7,%xmm7
vaesenc %xmm1,%xmm12,%xmm12
vpxor %xmm0,%xmm4,%xmm4
movbeq 8(%r14),%r13
vaesenc %xmm1,%xmm13,%xmm13
movbeq 0(%r14),%r12
vaesenc %xmm1,%xmm14,%xmm14
vmovups 160-128(%rcx),%xmm1
cmpl $11,%r10d
jb L$enc_tail
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 176-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 192-128(%rcx),%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 208-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 224-128(%rcx),%xmm1
jmp L$enc_tail
.p2align 5
L$handle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vmovdqu 0-32(%r9),%xmm3
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm15,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm15,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpshufb %xmm0,%xmm14,%xmm14
vpshufb %xmm0,%xmm1,%xmm1
jmp L$resume_ctr32
.p2align 5
L$enc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
vpalignr $8,%xmm4,%xmm4,%xmm8
vaesenc %xmm15,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
vpxor 0(%rdi),%xmm1,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
vpxor 16(%rdi),%xmm1,%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vpxor 32(%rdi),%xmm1,%xmm5
vaesenc %xmm15,%xmm13,%xmm13
vpxor 48(%rdi),%xmm1,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor 64(%rdi),%xmm1,%xmm7
vpxor 80(%rdi),%xmm1,%xmm3
vmovdqu (%r8),%xmm1
vaesenclast %xmm2,%xmm9,%xmm9
vmovdqu 32(%r11),%xmm2
vaesenclast %xmm0,%xmm10,%xmm10
vpaddb %xmm2,%xmm1,%xmm0
movq %r13,112+8(%rsp)
leaq 96(%rdi),%rdi
prefetcht0 512(%rdi)
prefetcht0 576(%rdi)
vaesenclast %xmm5,%xmm11,%xmm11
vpaddb %xmm2,%xmm0,%xmm5
movq %r12,120+8(%rsp)
leaq 96(%rsi),%rsi
vmovdqu 0-128(%rcx),%xmm15
vaesenclast %xmm6,%xmm12,%xmm12
vpaddb %xmm2,%xmm5,%xmm6
vaesenclast %xmm7,%xmm13,%xmm13
vpaddb %xmm2,%xmm6,%xmm7
vaesenclast %xmm3,%xmm14,%xmm14
vpaddb %xmm2,%xmm7,%xmm3
addq $0x60,%rax
subq $0x6,%rdx
jc L$6x_done
vmovups %xmm9,-96(%rsi)
vpxor %xmm15,%xmm1,%xmm9
vmovups %xmm10,-80(%rsi)
vmovdqa %xmm0,%xmm10
vmovups %xmm11,-64(%rsi)
vmovdqa %xmm5,%xmm11
vmovups %xmm12,-48(%rsi)
vmovdqa %xmm6,%xmm12
vmovups %xmm13,-32(%rsi)
vmovdqa %xmm7,%xmm13
vmovups %xmm14,-16(%rsi)
vmovdqa %xmm3,%xmm14
vmovdqu 32+8(%rsp),%xmm7
jmp L$oop6x
L$6x_done:
vpxor 16+8(%rsp),%xmm8,%xmm8
vpxor %xmm4,%xmm8,%xmm8
ret
.globl _aesni_gcm_decrypt
.private_extern _aesni_gcm_decrypt
.p2align 5
_aesni_gcm_decrypt:
_CET_ENDBR
xorq %rax,%rax
cmpq $0x60,%rdx
jb L$gcm_dec_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
movq 16(%rbp),%r12
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
vmovdqu (%r12),%xmm8
andq $-128,%rsp
vmovdqu (%r11),%xmm0
leaq 128(%rcx),%rcx
leaq 32(%r9),%r9
movl 240-128(%rcx),%r10d
vpshufb %xmm0,%xmm8,%xmm8
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$dec_no_key_aliasing
cmpq $768,%r15
jnc L$dec_no_key_aliasing
subq %r15,%rsp
L$dec_no_key_aliasing:
vmovdqu 80(%rdi),%xmm7
movq %rdi,%r14
vmovdqu 64(%rdi),%xmm4
leaq -192(%rdi,%rdx,1),%r15
vmovdqu 48(%rdi),%xmm5
shrq $4,%rdx
xorq %rax,%rax
vmovdqu 32(%rdi),%xmm6
vpshufb %xmm0,%xmm7,%xmm7
vmovdqu 16(%rdi),%xmm2
vpshufb %xmm0,%xmm4,%xmm4
vmovdqu (%rdi),%xmm3
vpshufb %xmm0,%xmm5,%xmm5
vmovdqu %xmm4,48(%rsp)
vpshufb %xmm0,%xmm6,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm2,%xmm2
vmovdqu %xmm6,80(%rsp)
vpshufb %xmm0,%xmm3,%xmm3
vmovdqu %xmm2,96(%rsp)
vmovdqu %xmm3,112(%rsp)
call _aesni_ctr32_ghash_6x
movq 16(%rbp),%r12
vmovups %xmm9,-96(%rsi)
vmovups %xmm10,-80(%rsi)
vmovups %xmm11,-64(%rsi)
vmovups %xmm12,-48(%rsi)
vmovups %xmm13,-32(%rsi)
vmovups %xmm14,-16(%rsi)
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_dec_abort:
ret
.p2align 5
_aesni_ctr32_6x:
vmovdqu 0-128(%rcx),%xmm4
vmovdqu 32(%r11),%xmm2
leaq -1(%r10),%r13
vmovups 16-128(%rcx),%xmm15
leaq 32-128(%rcx),%r12
vpxor %xmm4,%xmm1,%xmm9
addl $100663296,%ebx
jc L$handle_ctr32_2
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddb %xmm2,%xmm11,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddb %xmm2,%xmm12,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.p2align 4
L$oop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vmovups (%r12),%xmm15
leaq 16(%r12),%r12
decl %r13d
jnz L$oop_ctr32
vmovdqu (%r12),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor 0(%rdi),%xmm3,%xmm4
vaesenc %xmm15,%xmm10,%xmm10
vpxor 16(%rdi),%xmm3,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
vpxor 32(%rdi),%xmm3,%xmm6
vaesenc %xmm15,%xmm12,%xmm12
vpxor 48(%rdi),%xmm3,%xmm8
vaesenc %xmm15,%xmm13,%xmm13
vpxor 64(%rdi),%xmm3,%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vpxor 80(%rdi),%xmm3,%xmm3
leaq 96(%rdi),%rdi
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm5,%xmm10,%xmm10
vaesenclast %xmm6,%xmm11,%xmm11
vaesenclast %xmm8,%xmm12,%xmm12
vaesenclast %xmm2,%xmm13,%xmm13
vaesenclast %xmm3,%xmm14,%xmm14
vmovups %xmm9,0(%rsi)
vmovups %xmm10,16(%rsi)
vmovups %xmm11,32(%rsi)
vmovups %xmm12,48(%rsi)
vmovups %xmm13,64(%rsi)
vmovups %xmm14,80(%rsi)
leaq 96(%rsi),%rsi
ret
.p2align 5
L$handle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpshufb %xmm0,%xmm14,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpshufb %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.globl _aesni_gcm_encrypt
.private_extern _aesni_gcm_encrypt
.p2align 5
_aesni_gcm_encrypt:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+2(%rip)
#endif
xorq %rax,%rax
cmpq $288,%rdx
jb L$gcm_enc_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
leaq 128(%rcx),%rcx
vmovdqu (%r11),%xmm0
andq $-128,%rsp
movl 240-128(%rcx),%r10d
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$enc_no_key_aliasing
cmpq $768,%r15
jnc L$enc_no_key_aliasing
subq %r15,%rsp
L$enc_no_key_aliasing:
movq %rsi,%r14
leaq -192(%rsi,%rdx,1),%r15
shrq $4,%rdx
call _aesni_ctr32_6x
vpshufb %xmm0,%xmm9,%xmm8
vpshufb %xmm0,%xmm10,%xmm2
vmovdqu %xmm8,112(%rsp)
vpshufb %xmm0,%xmm11,%xmm4
vmovdqu %xmm2,96(%rsp)
vpshufb %xmm0,%xmm12,%xmm5
vmovdqu %xmm4,80(%rsp)
vpshufb %xmm0,%xmm13,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm14,%xmm7
vmovdqu %xmm6,48(%rsp)
call _aesni_ctr32_6x
movq 16(%rbp),%r12
leaq 32(%r9),%r9
vmovdqu (%r12),%xmm8
subq $12,%rdx
movq $192,%rax
vpshufb %xmm0,%xmm8,%xmm8
call _aesni_ctr32_ghash_6x
vmovdqu 32(%rsp),%xmm7
vmovdqu (%r11),%xmm0
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm7,%xmm7,%xmm1
vmovdqu 32-32(%r9),%xmm15
vmovups %xmm9,-96(%rsi)
vpshufb %xmm0,%xmm9,%xmm9
vpxor %xmm7,%xmm1,%xmm1
vmovups %xmm10,-80(%rsi)
vpshufb %xmm0,%xmm10,%xmm10
vmovups %xmm11,-64(%rsi)
vpshufb %xmm0,%xmm11,%xmm11
vmovups %xmm12,-48(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vmovups %xmm13,-32(%rsi)
vpshufb %xmm0,%xmm13,%xmm13
vmovups %xmm14,-16(%rsi)
vpshufb %xmm0,%xmm14,%xmm14
vmovdqu %xmm9,16(%rsp)
vmovdqu 48(%rsp),%xmm6
vmovdqu 16-32(%r9),%xmm0
vpunpckhqdq %xmm6,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
vpxor %xmm6,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vmovdqu 64(%rsp),%xmm9
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm9,%xmm9,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
vpxor %xmm9,%xmm5,%xmm5
vpxor %xmm7,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vmovdqu 80(%rsp),%xmm1
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm4,%xmm7,%xmm7
vpunpckhqdq %xmm1,%xmm1,%xmm4
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpxor %xmm6,%xmm9,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 96(%rsp),%xmm2
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm7,%xmm6,%xmm6
vpunpckhqdq %xmm2,%xmm2,%xmm7
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpxor %xmm9,%xmm1,%xmm1
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm5,%xmm4,%xmm4
vpxor 112(%rsp),%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
vmovdqu 112-32(%r9),%xmm0
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
vpxor %xmm4,%xmm7,%xmm4
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm1
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
vpxor %xmm14,%xmm1,%xmm1
vpxor %xmm5,%xmm6,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
vmovdqu 32-32(%r9),%xmm15
vpxor %xmm2,%xmm8,%xmm7
vpxor %xmm4,%xmm9,%xmm6
vmovdqu 16-32(%r9),%xmm0
vpxor %xmm5,%xmm7,%xmm9
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
vpxor %xmm9,%xmm6,%xmm6
vpunpckhqdq %xmm13,%xmm13,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
vpxor %xmm13,%xmm2,%xmm2
vpslldq $8,%xmm6,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vpxor %xmm9,%xmm5,%xmm8
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm6,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm12,%xmm12,%xmm9
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
vpxor %xmm12,%xmm9,%xmm9
vpxor %xmm14,%xmm13,%xmm13
vpalignr $8,%xmm8,%xmm8,%xmm14
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm11,%xmm11,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
vpxor %xmm11,%xmm1,%xmm1
vpxor %xmm13,%xmm12,%xmm12
vxorps 16(%rsp),%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm9,%xmm9
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm10,%xmm10,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
vpxor %xmm10,%xmm2,%xmm2
vpalignr $8,%xmm8,%xmm8,%xmm14
vpxor %xmm12,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm9,%xmm1,%xmm1
vxorps %xmm7,%xmm14,%xmm14
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
vmovdqu 112-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm11,%xmm10,%xmm10
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
vpxor %xmm4,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
vpxor %xmm10,%xmm7,%xmm7
vpxor %xmm2,%xmm6,%xmm6
vpxor %xmm5,%xmm7,%xmm4
vpxor %xmm4,%xmm6,%xmm6
vpslldq $8,%xmm6,%xmm1
vmovdqu 16(%r11),%xmm3
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm1,%xmm5,%xmm8
vpxor %xmm6,%xmm7,%xmm7
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm2,%xmm8,%xmm8
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm7,%xmm2,%xmm2
vpxor %xmm2,%xmm8,%xmm8
movq 16(%rbp),%r12
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_enc_abort:
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$poly:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
L$one_msb:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
L$two_lsb:
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
L$one_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
Prom11111/plumbingtech | 10,863 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-neon-armv8-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl gcm_init_neon
.def gcm_init_neon
.type 32
.endef
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl gcm_gmult_neon
.def gcm_gmult_neon
.type 32
.endef
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl gcm_ghash_neon
.def gcm_ghash_neon
.type 32
.endef
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section .rodata
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Prom11111/plumbingtech | 190,544 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha20_poly1305_x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 6
chacha20_poly1305_constants:
L$chacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
L$rol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
L$rol16:
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
L$avx2_init:
.long 0,0,0,0
L$sse_inc:
.long 1,0,0,0
L$avx2_inc:
.long 2,0,0,0,2,0,0,0
L$clamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
.p2align 4
L$and_masks:
.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
.text
.p2align 6
poly_hash_ad_internal:
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
cmpq $13,%r8
jne L$hash_ad_loop
L$poly_fast_tls_ad:
movq (%rcx),%r10
movq 5(%rcx),%r11
shrq $24,%r11
movq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
ret
L$hash_ad_loop:
cmpq $16,%r8
jb L$hash_ad_tail
addq 0+0(%rcx),%r10
adcq 8+0(%rcx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rcx),%rcx
subq $16,%r8
jmp L$hash_ad_loop
L$hash_ad_tail:
cmpq $0,%r8
je L$hash_ad_done
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
addq %r8,%rcx
L$hash_ad_tail_loop:
shldq $8,%r13,%r14
shlq $8,%r13
movzbq -1(%rcx),%r15
xorq %r15,%r13
decq %rcx
decq %r8
jne L$hash_ad_tail_loop
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$hash_ad_done:
ret
.globl _chacha20_poly1305_open_sse41
.private_extern _chacha20_poly1305_open_sse41
.p2align 6
_chacha20_poly1305_open_sse41:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
cmpq $128,%rbx
jbe L$open_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm7
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movq $10,%r10
L$open_sse_init_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jne L$open_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_main_loop:
cmpq $256,%rbx
jb L$open_sse_tail
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $4,%rcx
movq %rsi,%r8
L$open_sse_main_loop_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
leaq 16(%r8),%r8
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %rcx
jge L$open_sse_main_loop_rounds
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
cmpq $-6,%rcx
jg L$open_sse_main_loop_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor 0+80(%rbp),%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_sse_main_loop
L$open_sse_tail:
testq %rbx,%rbx
jz L$open_sse_finalize
cmpq $192,%rbx
ja L$open_sse_tail_256
cmpq $128,%rbx
ja L$open_sse_tail_192
cmpq $64,%rbx
ja L$open_sse_tail_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
cmpq $16,%rcx
jb L$open_sse_tail_64_rounds
L$open_sse_tail_64_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
L$open_sse_tail_64_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
cmpq $16,%rcx
jae L$open_sse_tail_64_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_64_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movq %rbx,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_128_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
cmpq %rcx,%r8
jb L$open_sse_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_128_rounds
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
subq $64,%rbx
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movq %rbx,%rcx
movq $160,%r8
cmpq $160,%rcx
cmovgq %r8,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_192_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
cmpq %rcx,%r8
jb L$open_sse_tail_192_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_192_rounds
cmpq $176,%rbx
jb L$open_sse_tail_192_finish
addq 0+160(%rsi),%r10
adcq 8+160(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
cmpq $192,%rbx
jb L$open_sse_tail_192_finish
addq 0+176(%rsi),%r10
adcq 8+176(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_finish:
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
subq $128,%rbx
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_256:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
xorq %r8,%r8
L$open_sse_tail_256_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
movdqa 0+80(%rbp),%xmm11
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
movdqa 0+80(%rbp),%xmm9
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
movdqa 0+80(%rbp),%xmm11
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
movdqa 0+80(%rbp),%xmm9
addq $16,%r8
cmpq $160,%r8
jb L$open_sse_tail_256_rounds_and_x1hash
movq %rbx,%rcx
andq $-16,%rcx
L$open_sse_tail_256_hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%r8
cmpq %rcx,%r8
jb L$open_sse_tail_256_hash
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqa 0+80(%rbp),%xmm12
subq $192,%rbx
leaq 192(%rsi),%rsi
leaq 192(%rdi),%rdi
L$open_sse_tail_64_dec_loop:
cmpq $16,%rbx
jb L$open_sse_tail_16_init
subq $16,%rbx
movdqu (%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_16_init:
movdqa %xmm0,%xmm1
L$open_sse_tail_16:
testq %rbx,%rbx
jz L$open_sse_finalize
pxor %xmm3,%xmm3
leaq -1(%rsi,%rbx,1),%rsi
movq %rbx,%r8
L$open_sse_tail_16_compose:
pslldq $1,%xmm3
pinsrb $0,(%rsi),%xmm3
subq $1,%rsi
subq $1,%r8
jnz L$open_sse_tail_16_compose
.byte 102,73,15,126,221
pextrq $1,%xmm3,%r14
pxor %xmm1,%xmm3
L$open_sse_tail_16_extract:
pextrb $0,%xmm3,(%rdi)
psrldq $1,%xmm3
addq $1,%rdi
subq $1,%rbx
jne L$open_sse_tail_16_extract
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_finalize:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$open_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm13,%xmm15
movq $10,%r10
L$open_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$open_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm9
paddd %xmm11,%xmm10
paddd %xmm15,%xmm13
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm14
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_128_xor_hash:
cmpq $16,%rbx
jb L$open_sse_tail_16
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm1
movdqu %xmm1,0(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
movdqa %xmm2,%xmm13
movdqa %xmm6,%xmm2
movdqa %xmm10,%xmm6
movdqa %xmm14,%xmm10
jmp L$open_sse_128_xor_hash
.globl _chacha20_poly1305_seal_sse41
.private_extern _chacha20_poly1305_seal_sse41
.p2align 6
_chacha20_poly1305_seal_sse41:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
cmpq $128,%rbx
jbe L$seal_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm14
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $10,%r10
L$seal_sse_init_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jnz L$seal_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
pand L$clamp(%rip),%xmm3
movdqa %xmm3,0+0(%rbp)
movdqa %xmm7,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
cmpq $192,%rbx
ja L$seal_sse_main_init
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_init:
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 128(%rdi)
movdqu %xmm4,16 + 128(%rdi)
movdqu %xmm8,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
movq $2,%rcx
movq $8,%r8
cmpq $64,%rbx
jbe L$seal_sse_tail_64
cmpq $128,%rbx
jbe L$seal_sse_tail_128
cmpq $192,%rbx
jbe L$seal_sse_tail_192
L$seal_sse_main_loop:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
.p2align 5
L$seal_sse_main_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
leaq 16(%rdi),%rdi
decq %r8
jge L$seal_sse_main_rounds
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_main_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm14,0+80(%rbp)
movdqa %xmm14,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm14
pxor %xmm3,%xmm14
movdqu %xmm14,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm14
pxor %xmm7,%xmm14
movdqu %xmm14,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm14
pxor %xmm11,%xmm14
movdqu %xmm14,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm14
pxor %xmm15,%xmm14
movdqu %xmm14,48 + 0(%rdi)
movdqa 0+80(%rbp),%xmm14
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
cmpq $256,%rbx
ja L$seal_sse_main_loop_xor
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_loop_xor:
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
subq $256,%rbx
movq $6,%rcx
movq $4,%r8
cmpq $192,%rbx
jg L$seal_sse_main_loop
movq %rbx,%rcx
testq %rbx,%rbx
je L$seal_sse_128_tail_hash
movq $6,%rcx
cmpq $128,%rbx
ja L$seal_sse_tail_192
cmpq $64,%rbx
ja L$seal_sse_tail_128
L$seal_sse_tail_64:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
L$seal_sse_tail_64_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_64_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_64_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_64_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
L$seal_sse_tail_128_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_128_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_128_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_128_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movq $64,%rcx
subq $64,%rbx
leaq 64(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
L$seal_sse_tail_192_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_192_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_192_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_192_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
L$seal_sse_128_tail_hash:
cmpq $16,%rcx
jb L$seal_sse_128_tail_xor
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
leaq 16(%rdi),%rdi
jmp L$seal_sse_128_tail_hash
L$seal_sse_128_tail_xor:
cmpq $16,%rbx
jb L$seal_sse_tail_16
subq $16,%rbx
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,0(%rdi)
addq 0(%rdi),%r10
adcq 8(%rdi),%r11
adcq $1,%r12
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
movdqa %xmm1,%xmm12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_16:
testq %rbx,%rbx
jz L$process_blocks_of_extra_in
movq %rbx,%r8
movq %rbx,%rcx
leaq -1(%rsi,%rbx,1),%rsi
pxor %xmm15,%xmm15
L$seal_sse_tail_16_compose:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
decq %rcx
jne L$seal_sse_tail_16_compose
pxor %xmm0,%xmm15
movq %rbx,%rcx
movdqu %xmm15,%xmm0
L$seal_sse_tail_16_extract:
pextrb $0,%xmm0,(%rdi)
psrldq $1,%xmm0
addq $1,%rdi
subq $1,%rcx
jnz L$seal_sse_tail_16_extract
movq 288 + 0 + 32(%rsp),%r9
movq 56(%r9),%r14
movq 48(%r9),%r13
testq %r14,%r14
jz L$process_partial_block
movq $16,%r15
subq %rbx,%r15
cmpq %r15,%r14
jge L$load_extra_in
movq %r14,%r15
L$load_extra_in:
leaq -1(%r13,%r15,1),%rsi
addq %r15,%r13
subq %r15,%r14
movq %r13,48(%r9)
movq %r14,56(%r9)
addq %r15,%r8
pxor %xmm11,%xmm11
L$load_extra_load_loop:
pslldq $1,%xmm11
pinsrb $0,(%rsi),%xmm11
leaq -1(%rsi),%rsi
subq $1,%r15
jnz L$load_extra_load_loop
movq %rbx,%r15
L$load_extra_shift_loop:
pslldq $1,%xmm11
subq $1,%r15
jnz L$load_extra_shift_loop
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
por %xmm11,%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$process_blocks_of_extra_in:
movq 288+32+0 (%rsp),%r9
movq 48(%r9),%rsi
movq 56(%r9),%r8
movq %r8,%rcx
shrq $4,%r8
L$process_extra_hash_loop:
jz process_extra_in_trailer
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rsi),%rsi
subq $1,%r8
jmp L$process_extra_hash_loop
process_extra_in_trailer:
andq $15,%rcx
movq %rcx,%rbx
jz L$do_length_block
leaq -1(%rsi,%rcx,1),%rsi
L$process_extra_in_trailer_load:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
subq $1,%rcx
jnz L$process_extra_in_trailer_load
L$process_partial_block:
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$do_length_block:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$seal_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm14
movdqa %xmm14,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
movq $10,%r10
L$seal_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$seal_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm8
paddd %xmm11,%xmm9
paddd %xmm15,%xmm12
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm13
pand L$clamp(%rip),%xmm2
movdqa %xmm2,0+0(%rbp)
movdqa %xmm6,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
jmp L$seal_sse_128_tail_xor
.globl _chacha20_poly1305_open_avx2
.private_extern _chacha20_poly1305_open_avx2
.p2align 6
_chacha20_poly1305_open_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$open_avx2_192
cmpq $320,%rbx
jbe L$open_avx2_320
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%r10
L$open_avx2_init_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
decq %r10
jne L$open_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$open_avx2_init_hash:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%rcx
cmpq $64,%rcx
jne L$open_avx2_init_hash
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vmovdqu %ymm0,0(%rdi)
vmovdqu %ymm4,32(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
subq $64,%rbx
L$open_avx2_main_loop:
cmpq $512,%rbx
jb L$open_avx2_main_loop_done
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
L$open_avx2_main_loop_rounds:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rsi,%rcx,1),%r10
adcq 8+16(%rsi,%rcx,1),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rsi,%rcx,1),%r10
adcq 8+32(%rsi,%rcx,1),%r11
adcq $1,%r12
leaq 48(%rcx),%rcx
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
cmpq $60*8,%rcx
jne L$open_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+60*8(%rsi),%r10
adcq 8+60*8(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
addq 0+60*8+16(%rsi),%r10
adcq 8+60*8+16(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
leaq 512(%rdi),%rdi
subq $512,%rbx
jmp L$open_avx2_main_loop
L$open_avx2_main_loop_done:
testq %rbx,%rbx
vzeroupper
je L$open_sse_finalize
cmpq $384,%rbx
ja L$open_avx2_tail_512
cmpq $256,%rbx
ja L$open_avx2_tail_384
cmpq $128,%rbx
ja L$open_avx2_tail_256
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
andq $-16,%rcx
testq %rcx,%rcx
je L$open_avx2_tail_128_rounds
L$open_avx2_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_avx2_tail_128_rounds:
addq $16,%r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_avx2_tail_128_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $128,%rcx
shrq $4,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_256_rounds_and_x1hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_256_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
incq %r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
cmpq %rcx,%r8
jb L$open_avx2_tail_256_rounds_and_x1hash
cmpq $10,%r8
jne L$open_avx2_tail_256_rounds
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_tail_256_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_tail_256_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_tail_256_hash
L$open_avx2_tail_256_done:
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
subq $128,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $256,%rcx
shrq $4,%rcx
addq $6,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_384_rounds_and_x2hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_384_rounds_and_x1hash:
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
incq %r8
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_384_rounds_and_x2hash
cmpq $10,%r8
jne L$open_avx2_tail_384_rounds_and_x1hash
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_384_tail_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_384_tail_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_384_tail_hash
L$open_avx2_384_tail_done:
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
movq %rsi,%r8
L$open_avx2_tail_512_rounds_and_x2hash:
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
L$open_avx2_tail_512_rounds_and_x1hash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+16(%r8),%r10
adcq 8+16(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%r8),%r8
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
incq %rcx
cmpq $4,%rcx
jl L$open_avx2_tail_512_rounds_and_x2hash
cmpq $10,%rcx
jne L$open_avx2_tail_512_rounds_and_x1hash
movq %rbx,%rcx
subq $384,%rcx
andq $-16,%rcx
L$open_avx2_tail_512_hash:
testq %rcx,%rcx
je L$open_avx2_tail_512_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
subq $16,%rcx
jmp L$open_avx2_tail_512_hash
L$open_avx2_tail_512_done:
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 384(%rsi),%rsi
leaq 384(%rdi),%rdi
subq $384,%rbx
L$open_avx2_tail_128_xor:
cmpq $32,%rbx
jb L$open_avx2_tail_32_xor
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_32_xor:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm1
vmovdqu %xmm1,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vperm2i128 $0x11,%ymm0,%ymm0,%ymm0
vmovdqa %xmm0,%xmm1
L$open_avx2_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$open_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$open_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$open_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
L$open_avx2_short_hash_and_xor_loop:
cmpq $32,%rbx
jb L$open_avx2_short_tail_32
subq $32,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rsi),%r10
adcq 8+16(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$open_avx2_short_hash_and_xor_loop
L$open_avx2_short_tail_32:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_short_tail_32_exit
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm1
L$open_avx2_short_tail_32_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$open_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$open_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$open_avx2_short
.globl _chacha20_poly1305_seal_avx2
.private_extern _chacha20_poly1305_seal_avx2
.p2align 6
_chacha20_poly1305_seal_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$seal_avx2_192
cmpq $320,%rbx
jbe L$seal_avx2_320
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm4,%ymm7
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vmovdqa %ymm8,%ymm11
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,%ymm15
vpaddd L$avx2_inc(%rip),%ymm15,%ymm14
vpaddd L$avx2_inc(%rip),%ymm14,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm15,0+256(%rbp)
movq $10,%r10
L$seal_avx2_init_rounds:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %r10
jnz L$seal_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vperm2i128 $0x02,%ymm3,%ymm7,%ymm15
vperm2i128 $0x13,%ymm3,%ymm7,%ymm3
vpand L$clamp(%rip),%ymm15,%ymm15
vmovdqa %ymm15,0+0(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
vpxor 0(%rsi),%ymm3,%ymm3
vpxor 32(%rsi),%ymm11,%ymm11
vmovdqu %ymm3,0(%rdi)
vmovdqu %ymm11,32(%rdi)
vperm2i128 $0x02,%ymm2,%ymm6,%ymm15
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+64(%rsi),%ymm15,%ymm15
vpxor 32+64(%rsi),%ymm2,%ymm2
vpxor 64+64(%rsi),%ymm6,%ymm6
vpxor 96+64(%rsi),%ymm10,%ymm10
vmovdqu %ymm15,0+64(%rdi)
vmovdqu %ymm2,32+64(%rdi)
vmovdqu %ymm6,64+64(%rdi)
vmovdqu %ymm10,96+64(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm15
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+192(%rsi),%ymm15,%ymm15
vpxor 32+192(%rsi),%ymm1,%ymm1
vpxor 64+192(%rsi),%ymm5,%ymm5
vpxor 96+192(%rsi),%ymm9,%ymm9
vmovdqu %ymm15,0+192(%rdi)
vmovdqu %ymm1,32+192(%rdi)
vmovdqu %ymm5,64+192(%rdi)
vmovdqu %ymm9,96+192(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm15
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm15,%ymm8
leaq 320(%rsi),%rsi
subq $320,%rbx
movq $320,%rcx
cmpq $128,%rbx
jbe L$seal_avx2_short_hash_remainder
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vpxor 64(%rsi),%ymm8,%ymm8
vpxor 96(%rsi),%ymm12,%ymm12
vmovdqu %ymm0,320(%rdi)
vmovdqu %ymm4,352(%rdi)
vmovdqu %ymm8,384(%rdi)
vmovdqu %ymm12,416(%rdi)
leaq 128(%rsi),%rsi
subq $128,%rbx
movq $8,%rcx
movq $2,%r8
cmpq $128,%rbx
jbe L$seal_avx2_tail_128
cmpq $256,%rbx
jbe L$seal_avx2_tail_256
cmpq $384,%rbx
jbe L$seal_avx2_tail_384
cmpq $512,%rbx
jbe L$seal_avx2_tail_512
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
subq $16,%rdi
movq $9,%rcx
jmp L$seal_avx2_main_loop_rounds_entry
.p2align 5
L$seal_avx2_main_loop:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%rcx
.p2align 5
L$seal_avx2_main_loop_rounds:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$seal_avx2_main_loop_rounds_entry:
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rdi),%r10
adcq 8+32(%rdi),%r11
adcq $1,%r12
leaq 48(%rdi),%rdi
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %rcx
jne L$seal_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
subq $512,%rbx
cmpq $512,%rbx
jg L$seal_avx2_main_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
movq $10,%rcx
xorq %r8,%r8
cmpq $384,%rbx
ja L$seal_avx2_tail_512
cmpq $256,%rbx
ja L$seal_avx2_tail_384
cmpq $128,%rbx
ja L$seal_avx2_tail_256
L$seal_avx2_tail_128:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_128_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_128_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_128_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_128_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$seal_avx2_short_loop
L$seal_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
L$seal_avx2_tail_256_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_256_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_256_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_256_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $128,%rcx
leaq 128(%rsi),%rsi
subq $128,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
L$seal_avx2_tail_384_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_384_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_384_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_384_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $256,%rcx
leaq 256(%rsi),%rsi
subq $256,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_512_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_512_rounds_and_2xhash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
addq %rax,%r15
adcq %rdx,%r9
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_512_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_512_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $384,%rcx
leaq 384(%rsi),%rsi
subq $384,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$seal_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$seal_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$seal_avx2_short
L$seal_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$seal_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$seal_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$seal_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$seal_avx2_short_hash_remainder:
cmpq $16,%rcx
jb L$seal_avx2_short_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
addq $16,%rdi
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_short_loop:
cmpq $32,%rbx
jb L$seal_avx2_short_tail
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$seal_avx2_short_loop
L$seal_avx2_short_tail:
cmpq $16,%rbx
jb L$seal_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm0
L$seal_avx2_exit:
vzeroupper
jmp L$seal_sse_tail_16
#endif
|
Prom11111/plumbingtech | 10,875 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-neon-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.text
.globl _gcm_init_neon
.private_extern _gcm_init_neon
.align 4
_gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl _gcm_gmult_neon
.private_extern _gcm_gmult_neon
.align 4
_gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl _gcm_ghash_neon
.private_extern _gcm_ghash_neon
.align 4
_gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section __TEXT,__const
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Prom11111/plumbingtech | 17,785 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/bsaes-armv7-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ of Linaro.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@
@ February 2012.
@
@ This implementation is direct adaptation of bsaes-x86_64 module for
@ ARM NEON. Except that this module is endian-neutral [in sense that
@ it can be compiled for either endianness] by courtesy of vld1.8's
@ neutrality. Initial version doesn't implement interface to OpenSSL,
@ only low-level primitives and unsupported entry points, just enough
@ to collect performance results, which for Cortex-A8 core are:
@
@ encrypt 19.5 cycles per byte processed with 128-bit key
@ decrypt 22.1 cycles per byte processed with 128-bit key
@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
@
@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
@ which is [much] worse than anticipated (for further details see
@ http://www.openssl.org/~appro/Snapdragon-S4.html).
@
@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
@ manages in 20.0 cycles].
@
@ When comparing to x86_64 results keep in mind that NEON unit is
@ [mostly] single-issue and thus can't [fully] benefit from
@ instruction-level parallelism. And when comparing to aes-armv4
@ results keep in mind key schedule conversion overhead (see
@ bsaes-x86_64.pl for further details)...
@
@ <appro@openssl.org>
@ April-August 2013
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
# define VFP_ABI_FRAME 0x40
#else
# define VFP_ABI_PUSH
# define VFP_ABI_POP
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
.type _bsaes_const,%object
.align 6
_bsaes_const:
.LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
.size _bsaes_const,.-_bsaes_const
.type _bsaes_encrypt8,%function
.align 4
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-.LM0SR
#endif
vldmia r6!, {q8} @ .LM0SR
_bsaes_encrypt8_alt:
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
_bsaes_encrypt8_bitslice:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b .Lenc_sbox
.align 4
.Lenc_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
.Lenc_sbox:
veor q2, q2, q1
veor q5, q5, q6
veor q3, q3, q0
veor q6, q6, q2
veor q5, q5, q0
veor q6, q6, q3
veor q3, q3, q7
veor q7, q7, q5
veor q3, q3, q4
veor q4, q4, q5
veor q2, q2, q7
veor q3, q3, q1
veor q1, q1, q5
veor q11, q7, q4
veor q10, q1, q2
veor q9, q5, q3
veor q13, q2, q4
vmov q8, q10
veor q12, q6, q0
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q3, q0
vand q15, q15, q12
vand q13, q13, q9
veor q9, q7, q1
veor q12, q5, q6
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q2, q3
veor q9, q9, q14
vand q13, q4, q0
vand q14, q1, q5
vorr q15, q7, q6
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q6, q0
veor q8, q5, q3
veor q10, q15, q14
vand q10, q10, q6
veor q6, q6, q5
vand q11, q5, q15
vand q6, q6, q14
veor q5, q11, q10
veor q6, q6, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q3
vand q8, q8, q15
vand q3, q3, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q3
veor q12, q12, q11
veor q3, q3, q10
veor q6, q6, q12
veor q0, q0, q12
veor q5, q5, q8
veor q3, q3, q8
veor q12, q7, q4
veor q8, q1, q2
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q4
veor q12, q12, q8
veor q4, q4, q2
vand q8, q8, q15
vand q2, q2, q13
vand q12, q12, q14
vand q4, q4, q9
veor q8, q8, q12
veor q4, q4, q2
veor q12, q12, q11
veor q2, q2, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q7
veor q7, q7, q1
vand q11, q1, q15
vand q7, q7, q14
veor q1, q11, q10
veor q7, q7, q11
veor q7, q7, q12
veor q4, q4, q12
veor q1, q1, q8
veor q2, q2, q8
veor q7, q7, q0
veor q1, q1, q6
veor q6, q6, q0
veor q4, q4, q7
veor q0, q0, q1
veor q1, q1, q5
veor q5, q5, q2
veor q2, q2, q3
veor q3, q3, q5
veor q4, q4, q5
veor q6, q6, q3
subs r5,r5,#1
bcc .Lenc_done
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q4, q4, #12
veor q1, q1, q9
vext.8 q11, q6, q6, #12
veor q4, q4, q10
vext.8 q12, q3, q3, #12
veor q6, q6, q11
vext.8 q13, q7, q7, #12
veor q3, q3, q12
vext.8 q14, q2, q2, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q2, q2, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q3
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q3, q3, #8
veor q12, q12, q6
vext.8 q9, q7, q7, #8
veor q15, q15, q2
vext.8 q3, q6, q6, #8
veor q11, q11, q4
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q6, q2, q2, #8
veor q11, q11, q5
vext.8 q2, q4, q4, #8
veor q5, q9, q13
veor q4, q8, q12
veor q3, q3, q11
veor q7, q7, q15
veor q6, q6, q14
@ vmov q4, q8
veor q2, q2, q10
@ vmov q5, q9
vldmia r6, {q12} @ .LSR
ite eq @ Thumb2 thing, samity check in ARM
addeq r6,r6,#0x10
bne .Lenc_loop
vldmia r6, {q12} @ .LSRM0
b .Lenc_loop
.align 4
.Lenc_done:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q2, #1
vshr.u64 q11, q3, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q3, q3, q11
vshr.u64 q10, q4, #1
vshr.u64 q11, q0, #1
veor q10, q10, q6
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q6, q6, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q4, q4, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q3, #2
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q3, q3, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q6
veor q11, q11, q4
vand q10, q10, q9
vand q11, q11, q9
veor q6, q6, q10
vshl.u64 q10, q10, #2
veor q4, q4, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q6, #4
vshr.u64 q11, q4, #4
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q4, q4, q8
veor q6, q6, q8
veor q3, q3, q8
veor q7, q7, q8
veor q2, q2, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.size _bsaes_encrypt8,.-_bsaes_encrypt8
.type _bsaes_key_convert,%function
.align 4
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0
#else
sub r6,r6,#_bsaes_key_convert-.LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
vldmia r6, {q14} @ .LM0
#ifdef __ARMEL__
vrev32.8 q7, q7
vrev32.8 q15, q15
#endif
sub r5,r5,#1
vstmia r12!, {q7} @ save round 0 key
b .Lkey_loop
.align 4
.Lkey_loop:
vtbl.8 d14,{q15},d28
vtbl.8 d15,{q15},d29
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.8 {q15}, [r4]! @ load next round key
vmvn q0, q0 @ "pnot"
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
#ifdef __ARMEL__
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne .Lkey_loop
vmov.i8 q7,#0x63 @ compose .L63
@ don't save last round key
bx lr
.size _bsaes_key_convert,.-_bsaes_key_convert
.globl bsaes_ctr32_encrypt_blocks
.hidden bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,%function
.align 5
bsaes_ctr32_encrypt_blocks:
@ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
@ out to retain a constant-time implementation.
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ size of bit-sliced key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(.LREVM0SR-.LM0)
add r8, r6, r8
#else
add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
.align 2
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, .LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
sub sp, #0x10 @ place for adjusted round0 key
#endif
vmov.i32 q8,#1 @ compose 1<<96
veor q9,q9,q9
vrev32.8 q0,q0
vext.8 q8,q9,q8,#4
vrev32.8 q4,q4
vadd.u32 q9,q8,q8 @ compose 2<<96
vstmia sp, {q4} @ save adjusted round0 key
b .Lctr_enc_loop
.align 4
.Lctr_enc_loop:
vadd.u32 q10, q8, q9 @ compose 3<<96
vadd.u32 q1, q0, q8 @ +1
vadd.u32 q2, q0, q9 @ +2
vadd.u32 q3, q0, q10 @ +3
vadd.u32 q4, q1, q10
vadd.u32 q5, q2, q10
vadd.u32 q6, q3, q10
vadd.u32 q7, q4, q10
vadd.u32 q10, q5, q10 @ next counter
@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
@ to flip byte order in 32-bit counter
vldmia sp, {q9} @ load round0 key
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x10 @ pass next round key
#else
add r4, r3, #264
#endif
vldmia r8, {q8} @ .LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(.LREVM0SR-.LSR)
sub r6, r8, r6
#else
sub r6, r8, #.LREVM0SR-.LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo .Lctr_enc_loop_done
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
veor q5, q15
vst1.8 {q6}, [r1]!
vmov.i32 q8, #1 @ compose 1<<96
vst1.8 {q3}, [r1]!
veor q9, q9, q9
vst1.8 {q7}, [r1]!
vext.8 q8, q9, q8, #4
vst1.8 {q2}, [r1]!
vadd.u32 q9,q8,q8 @ compose 2<<96
vst1.8 {q5}, [r1]!
vldmia r9, {q0} @ load counter
bne .Lctr_enc_loop
b .Lctr_enc_done
.align 4
.Lctr_enc_loop_done:
add r2, r2, #8
vld1.8 {q8}, [r0]! @ load input
veor q0, q8
vst1.8 {q0}, [r1]! @ write output
cmp r2, #2
blo .Lctr_enc_done
vld1.8 {q9}, [r0]!
veor q1, q9
vst1.8 {q1}, [r1]!
beq .Lctr_enc_done
vld1.8 {q10}, [r0]!
veor q4, q10
vst1.8 {q4}, [r1]!
cmp r2, #4
blo .Lctr_enc_done
vld1.8 {q11}, [r0]!
veor q6, q11
vst1.8 {q6}, [r1]!
beq .Lctr_enc_done
vld1.8 {q12}, [r0]!
veor q3, q12
vst1.8 {q3}, [r1]!
cmp r2, #6
blo .Lctr_enc_done
vld1.8 {q13}, [r0]!
veor q7, q13
vst1.8 {q7}, [r1]!
beq .Lctr_enc_done
vld1.8 {q14}, [r0]
veor q2, q14
vst1.8 {q2}, [r1]!
.Lctr_enc_done:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
.Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lctr_enc_bzero
#else
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
@ OpenSSL contains aes_nohw_* fallback code here. We patch this
@ out to retain a constant-time implementation.
.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Prom11111/plumbingtech | 70,675 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha256-x86_64-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,@function
.align 16
sha256_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop
.align 16
.Lloop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz .Lrounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha256_block_data_order_hw
.hidden sha256_block_data_order_hw
.type sha256_block_data_order_hw,@function
.align 64
sha256_block_data_order_hw:
.cfi_startproc
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz .Loop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.cfi_endproc
.size sha256_block_data_order_hw,.-sha256_block_data_order_hw
.globl sha256_block_data_order_ssse3
.hidden sha256_block_data_order_ssse3
.type sha256_block_data_order_ssse3,@function
.align 64
sha256_block_data_order_ssse3:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_ssse3
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3:
ret
.cfi_endproc
.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3
.globl sha256_block_data_order_avx
.hidden sha256_block_data_order_avx
.type sha256_block_data_order_avx,@function
.align 64
sha256_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lavx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_avx
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha256_block_data_order_avx,.-sha256_block_data_order_avx
#endif
|
Prom11111/plumbingtech | 4,266 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghashv8-armx-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,%function
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,%function
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Prom11111/plumbingtech | 4,229 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/x86-mont-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_mont
.hidden bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
.L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
jmp .L001page_walk_done
.align 16
.L000page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
.L001page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 16
.L0021st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl .L0021st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
.L003outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
.L004inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz .L004inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle .L003outer
emms
jmp .L005common_tail
.align 16
.L005common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 16
.L006sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge .L006sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp .L007copy
.align 16
.L007copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge .L007copy
movl 24(%esp),%esp
movl $1,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_mont,.-.L_bn_mul_mont_begin
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
Prom11111/plumbingtech | 42,856 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha512-armv4-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@ This code is ~4.5 (four and a half) times faster than code generated
@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
@ Xscale PXA250 core].
@
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
@ Cortex A8 core and ~40 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 7%
@ improvement on Coxtex A8 core and ~38 cycles per byte.
@ March 2011.
@
@ Add NEON implementation. On Cortex A8 it was measured to process
@ one byte in 23.3 cycles or ~60% faster than integer-only code.
@ August 2012.
@
@ Improve NEON performance by 12% on Snapdragon S4. In absolute
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
@ Byte order [in]dependence. =========================================
@
@ Originally caller was expected to maintain specific *dword* order in
@ h[0-7], namely with most significant dword at *lower* address, which
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
# define __ARM_MAX_ARCH__ 7
# define VFP_ABI_PUSH
# define VFP_ABI_POP
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
#ifdef __ARMEL__
# define LO 0
# define HI 4
# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
#else
# define HI 0
# define LO 4
# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
#endif
.text
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.type K512,%object
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,%function
sha512_block_data_order_nohw:
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
adr r14,K512
sub sp,sp,#9*8
ldr r7,[r0,#32+LO]
ldr r8,[r0,#32+HI]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
.Loop:
str r9, [sp,#48+0]
str r10, [sp,#48+4]
str r11, [sp,#56+0]
str r12, [sp,#56+4]
ldr r5,[r0,#0+LO]
ldr r6,[r0,#0+HI]
ldr r3,[r0,#8+LO]
ldr r4,[r0,#8+HI]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
str r3,[sp,#8+0]
str r4,[sp,#8+4]
str r9, [sp,#16+0]
str r10, [sp,#16+4]
str r11, [sp,#24+0]
str r12, [sp,#24+4]
ldr r3,[r0,#40+LO]
ldr r4,[r0,#40+HI]
str r3,[sp,#40+0]
str r4,[sp,#40+4]
.L00_15:
#if __ARM_ARCH<7
ldrb r3,[r1,#7]
ldrb r9, [r1,#6]
ldrb r10, [r1,#5]
ldrb r11, [r1,#4]
ldrb r4,[r1,#3]
ldrb r12, [r1,#2]
orr r3,r3,r9,lsl#8
ldrb r9, [r1,#1]
orr r3,r3,r10,lsl#16
ldrb r10, [r1],#8
orr r3,r3,r11,lsl#24
orr r4,r4,r12,lsl#8
orr r4,r4,r9,lsl#16
orr r4,r4,r10,lsl#24
#else
ldr r3,[r1,#4]
ldr r4,[r1],#8
#ifdef __ARMEL__
rev r3,r3
rev r4,r4
#endif
#endif
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
tst r14,#1
beq .L00_15
ldr r9,[sp,#184+0]
ldr r10,[sp,#184+4]
bic r14,r14,#1
.L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov r3,r9,lsr#1
ldr r11,[sp,#80+0]
mov r4,r10,lsr#1
ldr r12,[sp,#80+4]
eor r3,r3,r10,lsl#31
eor r4,r4,r9,lsl#31
eor r3,r3,r9,lsr#8
eor r4,r4,r10,lsr#8
eor r3,r3,r10,lsl#24
eor r4,r4,r9,lsl#24
eor r3,r3,r9,lsr#7
eor r4,r4,r10,lsr#7
eor r3,r3,r10,lsl#25
@ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
@ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
@ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
mov r9,r11,lsr#19
mov r10,r12,lsr#19
eor r9,r9,r12,lsl#13
eor r10,r10,r11,lsl#13
eor r9,r9,r12,lsr#29
eor r10,r10,r11,lsr#29
eor r9,r9,r11,lsl#3
eor r10,r10,r12,lsl#3
eor r9,r9,r11,lsr#6
eor r10,r10,r12,lsr#6
ldr r11,[sp,#120+0]
eor r9,r9,r12,lsl#26
ldr r12,[sp,#120+4]
adds r3,r3,r9
ldr r9,[sp,#192+0]
adc r4,r4,r10
ldr r10,[sp,#192+4]
adds r3,r3,r11
adc r4,r4,r12
adds r3,r3,r9
adc r4,r4,r10
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#if __ARM_ARCH>=7
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
bic r14,r14,#1
ldr r3,[sp,#8+0]
ldr r4,[sp,#8+4]
ldr r9, [r0,#0+LO]
ldr r10, [r0,#0+HI]
ldr r11, [r0,#8+LO]
ldr r12, [r0,#8+HI]
adds r9,r5,r9
str r9, [r0,#0+LO]
adc r10,r6,r10
str r10, [r0,#0+HI]
adds r11,r3,r11
str r11, [r0,#8+LO]
adc r12,r4,r12
str r12, [r0,#8+HI]
ldr r5,[sp,#16+0]
ldr r6,[sp,#16+4]
ldr r3,[sp,#24+0]
ldr r4,[sp,#24+4]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
adds r9,r5,r9
str r9, [r0,#16+LO]
adc r10,r6,r10
str r10, [r0,#16+HI]
adds r11,r3,r11
str r11, [r0,#24+LO]
adc r12,r4,r12
str r12, [r0,#24+HI]
ldr r3,[sp,#40+0]
ldr r4,[sp,#40+4]
ldr r9, [r0,#32+LO]
ldr r10, [r0,#32+HI]
ldr r11, [r0,#40+LO]
ldr r12, [r0,#40+HI]
adds r7,r7,r9
str r7,[r0,#32+LO]
adc r8,r8,r10
str r8,[r0,#32+HI]
adds r11,r3,r11
str r11, [r0,#40+LO]
adc r12,r4,r12
str r12, [r0,#40+HI]
ldr r5,[sp,#48+0]
ldr r6,[sp,#48+4]
ldr r3,[sp,#56+0]
ldr r4,[sp,#56+4]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
adds r9,r5,r9
str r9, [r0,#48+LO]
adc r10,r6,r10
str r10, [r0,#48+HI]
adds r11,r3,r11
str r11, [r0,#56+LO]
adc r12,r4,r12
str r12, [r0,#56+HI]
add sp,sp,#640
sub r14,r14,#640
teq r1,r2
bne .Loop
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl sha512_block_data_order_neon
.hidden sha512_block_data_order_neon
.type sha512_block_data_order_neon,%function
.align 4
sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
VFP_ABI_PUSH
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
mov r12,#4
.L16_79_neon:
subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
vsli.64 q12,q7,#45
vext.8 q14,q0,q1,#8 @ X[i+1]
vsli.64 q13,q7,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q4,q5,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
vsli.64 q12,q0,#45
vext.8 q14,q1,q2,#8 @ X[i+1]
vsli.64 q13,q0,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q5,q6,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
vsli.64 q12,q1,#45
vext.8 q14,q2,q3,#8 @ X[i+1]
vsli.64 q13,q1,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q6,q7,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
vsli.64 q12,q2,#45
vext.8 q14,q3,q4,#8 @ X[i+1]
vsli.64 q13,q2,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q7,q0,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
vsli.64 q12,q3,#45
vext.8 q14,q4,q5,#8 @ X[i+1]
vsli.64 q13,q3,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q0,q1,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
vsli.64 q12,q4,#45
vext.8 q14,q5,q6,#8 @ X[i+1]
vsli.64 q13,q4,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q1,q2,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
vsli.64 q12,q5,#45
vext.8 q14,q6,q7,#8 @ X[i+1]
vsli.64 q13,q5,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q2,q3,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
vsli.64 q12,q6,#45
vext.8 q14,q7,q0,#8 @ X[i+1]
vsli.64 q13,q6,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q3,q4,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
bne .L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne .Loop_neon
VFP_ABI_POP
bx lr @ .word 0xe12fff1e
.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
#endif
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Prom11111/plumbingtech | 24,306 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aes-gcm-avx2-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 4
L$bswap_mask:
.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
L$gfpoly:
.quad 1, 0xc200000000000000
L$gfpoly_and_internal_carrybit:
.quad 1, 0xc200000000000001
.p2align 5
L$ctr_pattern:
.quad 0, 0
.quad 1, 0
L$inc_2blocks:
.quad 2, 0
.quad 2, 0
.text
.globl _gcm_init_vpclmulqdq_avx2
.private_extern _gcm_init_vpclmulqdq_avx2
.p2align 5
_gcm_init_vpclmulqdq_avx2:
_CET_ENDBR
vpshufd $0x4e,(%rsi),%xmm3
vpshufd $0xd3,%xmm3,%xmm0
vpsrad $31,%xmm0,%xmm0
vpaddq %xmm3,%xmm3,%xmm3
vpand L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
vpxor %xmm0,%xmm3,%xmm3
vbroadcasti128 L$gfpoly(%rip),%ymm6
vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1
vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2
vpshufd $0x4e,%xmm0,%xmm0
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm5,%xmm5
vpxor %xmm0,%xmm5,%xmm5
vinserti128 $1,%xmm3,%ymm5,%ymm3
vinserti128 $1,%xmm5,%ymm5,%ymm5
vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,96(%rdi)
vmovdqu %ymm4,64(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128+32(%rdi)
vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0
vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1
vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3
vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm3,%ymm3
vpxor %ymm0,%ymm3,%ymm3
vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,0(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128(%rdi)
vzeroupper
ret
.globl _gcm_ghash_vpclmulqdq_avx2_1
.private_extern _gcm_ghash_vpclmulqdq_avx2_1
.p2align 5
_gcm_ghash_vpclmulqdq_avx2_1:
_CET_ENDBR
vbroadcasti128 L$bswap_mask(%rip),%ymm6
vmovdqu (%rdi),%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vbroadcasti128 L$gfpoly(%rip),%ymm7
L$ghash_lastblock:
vmovdqu (%rdx),%xmm0
vpshufb %xmm6,%xmm0,%xmm0
vpxor %xmm0,%xmm5,%xmm5
vmovdqu 128-16(%rsi),%xmm0
vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm2,%xmm2
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
vpshufd $0x4e,%xmm2,%xmm2
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm1,%xmm5,%xmm5
L$ghash_done:
vpshufb %xmm6,%xmm5,%xmm5
vmovdqu %xmm5,(%rdi)
ret
.globl _aes_gcm_enc_update_vaes_avx2
.private_extern _aes_gcm_enc_update_vaes_avx2
.p2align 5
_aes_gcm_enc_update_vaes_avx2:
_CET_ENDBR
pushq %r12
movq 16(%rsp),%r12
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+8(%rip)
#endif
vbroadcasti128 L$bswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe L$crypt_loop_4x_done__func1
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
leaq 16(%rcx),%rax
L$vaesenc_loop_first_4_vecs__func1:
vbroadcasti128 (%rax),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_first_4_vecs__func1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
vaesenclast %ymm2,%ymm12,%ymm12
vaesenclast %ymm3,%ymm13,%ymm13
vaesenclast %ymm5,%ymm14,%ymm14
vaesenclast %ymm6,%ymm15,%ymm15
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
jbe L$ghash_last_ciphertext_4x__func1
.p2align 4
L$crypt_loop_4x__func1:
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl L$aes128__func1
je L$aes192__func1
vbroadcasti128 -208(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vbroadcasti128 -192(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
L$aes192__func1:
vbroadcasti128 -176(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vbroadcasti128 -160(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
L$aes128__func1:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
vbroadcasti128 -144(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vbroadcasti128 -128(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vmovdqu 96(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
vaesenclast %ymm2,%ymm12,%ymm12
vaesenclast %ymm3,%ymm13,%ymm13
vaesenclast %ymm5,%ymm14,%ymm14
vaesenclast %ymm6,%ymm15,%ymm15
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
ja L$crypt_loop_4x__func1
L$ghash_last_ciphertext_4x__func1:
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 96(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
L$crypt_loop_4x_done__func1:
testq %rdx,%rdx
jz L$done__func1
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb L$lessthan64bytes__func1
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_1__func1:
vbroadcasti128 (%rax),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_1__func1
vaesenclast %ymm10,%ymm12,%ymm12
vaesenclast %ymm10,%ymm13,%ymm13
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %ymm0,%ymm13,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz L$reduce__func1
vpxor %xmm1,%xmm1,%xmm1
L$lessthan64bytes__func1:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_2__func1:
vbroadcasti128 (%rax),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_2__func1
vaesenclast %ymm10,%ymm12,%ymm12
vaesenclast %ymm10,%ymm13,%ymm13
cmpq $32,%rdx
jb L$xor_one_block__func1
je L$xor_two_blocks__func1
L$xor_three_blocks__func1:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %xmm0,%xmm13,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp L$ghash_mul_one_vec_unreduced__func1
L$xor_two_blocks__func1:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp L$ghash_mul_one_vec_unreduced__func1
L$xor_one_block__func1:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
L$ghash_mul_one_vec_unreduced__func1:
vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm7,%ymm7
L$reduce__func1:
vbroadcasti128 L$gfpoly(%rip),%ymm2
vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
L$done__func1:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
ret
.globl _aes_gcm_dec_update_vaes_avx2
.private_extern _aes_gcm_dec_update_vaes_avx2
.p2align 5
_aes_gcm_dec_update_vaes_avx2:
_CET_ENDBR
pushq %r12
movq 16(%rsp),%r12
vbroadcasti128 L$bswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe L$crypt_loop_4x_done__func2
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
.p2align 4
L$crypt_loop_4x__func2:
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl L$aes128__func2
je L$aes192__func2
vbroadcasti128 -208(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vbroadcasti128 -192(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
L$aes192__func2:
vbroadcasti128 -176(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vbroadcasti128 -160(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
L$aes128__func2:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
vbroadcasti128 -144(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vbroadcasti128 -128(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vmovdqu 32(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vmovdqu 64(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vmovdqu 96(%r9),%ymm4
vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm5,%ymm5
vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
vaesenc %ymm2,%ymm14,%ymm14
vaesenc %ymm2,%ymm15,%ymm15
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
vaesenclast %ymm2,%ymm12,%ymm12
vaesenclast %ymm3,%ymm13,%ymm13
vaesenclast %ymm5,%ymm14,%ymm14
vaesenclast %ymm6,%ymm15,%ymm15
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
subq $-128,%rsi
addq $-128,%rdx
cmpq $127,%rdx
ja L$crypt_loop_4x__func2
L$crypt_loop_4x_done__func2:
testq %rdx,%rdx
jz L$done__func2
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb L$lessthan64bytes__func2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_1__func2:
vbroadcasti128 (%rax),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_1__func2
vaesenclast %ymm10,%ymm12,%ymm12
vaesenclast %ymm10,%ymm13,%ymm13
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %ymm0,%ymm3,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz L$reduce__func2
vpxor %xmm1,%xmm1,%xmm1
L$lessthan64bytes__func2:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_2__func2:
vbroadcasti128 (%rax),%ymm2
vaesenc %ymm2,%ymm12,%ymm12
vaesenc %ymm2,%ymm13,%ymm13
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_2__func2
vaesenclast %ymm10,%ymm12,%ymm12
vaesenclast %ymm10,%ymm13,%ymm13
cmpq $32,%rdx
jb L$xor_one_block__func2
je L$xor_two_blocks__func2
L$xor_three_blocks__func2:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %xmm0,%xmm3,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp L$ghash_mul_one_vec_unreduced__func2
L$xor_two_blocks__func2:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp L$ghash_mul_one_vec_unreduced__func2
L$xor_one_block__func2:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm2,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
L$ghash_mul_one_vec_unreduced__func2:
vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
vpxor %ymm4,%ymm7,%ymm7
L$reduce__func2:
vbroadcasti128 L$gfpoly(%rip),%ymm2
vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
L$done__func2:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
ret
#endif
|
Prom11111/plumbingtech | 20,965 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesni-x86_64-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop2:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop2
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.cfi_endproc
.size _aesni_encrypt2,.-_aesni_encrypt2
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop3
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.cfi_endproc
.size _aesni_encrypt3,.-_aesni_encrypt3
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 0x0f,0x1f,0x00
addq $16,%rax
.Lenc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop4
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.cfi_endproc
.size _aesni_encrypt4,.-_aesni_encrypt4
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop6_enter
.align 16
.Lenc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.Lenc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.cfi_endproc
.size _aesni_encrypt6,.-_aesni_encrypt6
.type _aesni_encrypt8,@function
.align 16
_aesni_encrypt8:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,209
pxor %xmm0,%xmm7
pxor %xmm0,%xmm8
.byte 102,15,56,220,217
pxor %xmm0,%xmm9
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop8_inner
.align 16
.Lenc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.Lenc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.Lenc_loop8_enter:
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop8
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
.byte 102,68,15,56,221,192
.byte 102,68,15,56,221,200
ret
.cfi_endproc
.size _aesni_encrypt8,.-_aesni_encrypt8
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit(%rip)
#endif
cmpq $1,%rdx
jne .Lctr32_bulk
movups (%r8),%xmm2
movups (%rdi),%xmm3
movl 240(%rcx),%edx
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
.Loop_enc1_1:
.byte 102,15,56,220,209
decl %edx
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
jnz .Loop_enc1_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorps %xmm3,%xmm2
pxor %xmm3,%xmm3
movups %xmm2,(%rsi)
xorps %xmm2,%xmm2
jmp .Lctr32_epilogue
.align 16
.Lctr32_bulk:
leaq (%rsp),%r11
.cfi_def_cfa_register %r11
pushq %rbp
.cfi_offset %rbp,-16
subq $128,%rsp
andq $-16,%rsp
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
movl 12(%r8),%r8d
pxor %xmm0,%xmm2
movl 12(%rcx),%ebp
movdqa %xmm2,0(%rsp)
bswapl %r8d
movdqa %xmm2,%xmm3
movdqa %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
movq %rdx,%r10
movdqa %xmm2,112(%rsp)
leaq 1(%r8),%rax
leaq 2(%r8),%rdx
bswapl %eax
bswapl %edx
xorl %ebp,%eax
xorl %ebp,%edx
.byte 102,15,58,34,216,3
leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
.byte 102,15,58,34,226,3
bswapl %eax
movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
xorl %ebp,%eax
bswapl %r10d
.byte 102,15,58,34,232,3
xorl %ebp,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
movl 240(%rcx),%eax
xorl %ebp,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
xorl %ebp,%r10d
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
xorl %ebp,%r9d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
movdqa 64(%rsp),%xmm6
movdqa 80(%rsp),%xmm7
cmpq $8,%rdx
jb .Lctr32_tail
leaq 128(%rcx),%rcx
subq $8,%rdx
jmp .Lctr32_loop8
.align 32
.Lctr32_loop8:
addl $8,%r8d
movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
movl %r8d,%r9d
movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
bswapl %r9d
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %ebp,%r9d
nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
xorl %ebp,%r9d
movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
jb .Lctr32_enc_done
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 176-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 192-128(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 208-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
jmp .Lctr32_enc_done
.align 16
.Lctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
movdqu 32(%rdi),%xmm12
pxor %xmm0,%xmm11
movdqu 48(%rdi),%xmm13
pxor %xmm0,%xmm12
movdqu 64(%rdi),%xmm14
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
prefetcht0 448(%rdi)
prefetcht0 512(%rdi)
pxor %xmm0,%xmm15
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
.byte 102,65,15,56,221,237
movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
.byte 102,65,15,56,221,255
movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
movups 16-128(%rcx),%xmm1
.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
movdqa %xmm0,%xmm7
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
subq $8,%rdx
jnc .Lctr32_loop8
addq $8,%rdx
jz .Lctr32_done
leaq -128(%rcx),%rcx
.Lctr32_tail:
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb .Lctr32_loop3
je .Lctr32_loop4
shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
leaq 32-16(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,225
addq $16,%rax
movups (%rdi),%xmm10
.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
movups 32(%rdi),%xmm12
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
call .Lenc_loop8_enter
movdqu 48(%rdi),%xmm13
pxor %xmm10,%xmm2
movdqu 64(%rdi),%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
pxor %xmm10,%xmm6
movdqu %xmm5,48(%rsi)
movdqu %xmm6,64(%rsi)
cmpq $6,%rdx
jb .Lctr32_done
movups 80(%rdi),%xmm11
xorps %xmm11,%xmm7
movups %xmm7,80(%rsi)
je .Lctr32_done
movups 96(%rdi),%xmm12
xorps %xmm12,%xmm8
movups %xmm8,96(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
jnz .Lctr32_loop4
.byte 102,15,56,221,209
.byte 102,15,56,221,217
movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
.byte 102,15,56,221,233
movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm4,32(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm5,48(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
jnz .Lctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
.byte 102,15,56,221,225
movups (%rdi),%xmm10
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
cmpq $2,%rdx
jb .Lctr32_done
movups 16(%rdi),%xmm11
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
je .Lctr32_done
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
.Lctr32_done:
xorps %xmm0,%xmm0
xorl %ebp,%ebp
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movaps %xmm0,0(%rsp)
pxor %xmm8,%xmm8
movaps %xmm0,16(%rsp)
pxor %xmm9,%xmm9
movaps %xmm0,32(%rsp)
pxor %xmm10,%xmm10
movaps %xmm0,48(%rsp)
pxor %xmm11,%xmm11
movaps %xmm0,64(%rsp)
pxor %xmm12,%xmm12
movaps %xmm0,80(%rsp)
pxor %xmm13,%xmm13
movaps %xmm0,96(%rsp)
pxor %xmm14,%xmm14
movaps %xmm0,112(%rsp)
pxor %xmm15,%xmm15
movq -8(%r11),%rbp
.cfi_restore %rbp
leaq (%r11),%rsp
.cfi_def_cfa_register %rsp
.Lctr32_epilogue:
ret
.cfi_endproc
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds
cmpl $128,%esi
jne .Lbad_keybits
.L10rounds:
movl $9,%esi
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call .Lkey_expansion_128_cold
.byte 102,15,58,223,200,2
call .Lkey_expansion_128
.byte 102,15,58,223,200,4
call .Lkey_expansion_128
.byte 102,15,58,223,200,8
call .Lkey_expansion_128
.byte 102,15,58,223,200,16
call .Lkey_expansion_128
.byte 102,15,58,223,200,32
call .Lkey_expansion_128
.byte 102,15,58,223,200,64
call .Lkey_expansion_128
.byte 102,15,58,223,200,128
call .Lkey_expansion_128
.byte 102,15,58,223,200,27
call .Lkey_expansion_128
.byte 102,15,58,223,200,54
call .Lkey_expansion_128
movups %xmm0,(%rax)
movl %esi,80(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret
.align 16
.L14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
call .Lkey_expansion_256a_cold
.byte 102,15,58,223,200,1
call .Lkey_expansion_256b
.byte 102,15,58,223,202,2
call .Lkey_expansion_256a
.byte 102,15,58,223,200,2
call .Lkey_expansion_256b
.byte 102,15,58,223,202,4
call .Lkey_expansion_256a
.byte 102,15,58,223,200,4
call .Lkey_expansion_256b
.byte 102,15,58,223,202,8
call .Lkey_expansion_256a
.byte 102,15,58,223,200,8
call .Lkey_expansion_256b
.byte 102,15,58,223,202,16
call .Lkey_expansion_256a
.byte 102,15,58,223,200,16
call .Lkey_expansion_256b
.byte 102,15,58,223,202,32
call .Lkey_expansion_256a
.byte 102,15,58,223,200,32
call .Lkey_expansion_256b
.byte 102,15,58,223,202,64
call .Lkey_expansion_256a
movups %xmm0,(%rax)
movl %esi,16(%rax)
xorq %rax,%rax
jmp .Lenc_key_ret
.align 16
.Lbad_keybits:
movq $-2,%rax
.Lenc_key_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.align 16
.Lkey_expansion_128:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256a:
.cfi_startproc
movups %xmm2,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256b:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_base,.-aes_hw_set_encrypt_key_base
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds_alt
cmpl $128,%esi
jne .Lbad_keybits_alt
movl $9,%esi
movdqa .Lkey_rotate(%rip),%xmm5
movl $8,%r10d
movdqa .Lkey_rcon1(%rip),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,(%rdx)
jmp .Loop_key128
.align 16
.Loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leaq 16(%rax),%rax
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%rax)
movdqa %xmm0,%xmm2
decl %r10d
jnz .Loop_key128
movdqa .Lkey_rcon1b(%rip),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%rax)
movl %esi,96(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.L14rounds_alt:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movdqa .Lkey_rotate(%rip),%xmm5
movdqa .Lkey_rcon1(%rip),%xmm4
movl $7,%r10d
movdqu %xmm0,0(%rdx)
movdqa %xmm2,%xmm1
movdqu %xmm2,16(%rdx)
jmp .Loop_key256
.align 16
.Loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
decl %r10d
jz .Ldone_key256
pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%rax)
leaq 32(%rax),%rax
movdqa %xmm2,%xmm1
jmp .Loop_key256
.Ldone_key256:
movl %esi,16(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.Lbad_keybits_alt:
movq $-2,%rax
.Lenc_key_ret_alt:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_alt,.-aes_hw_set_encrypt_key_alt
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lincrement32:
.long 6,6,6,0
.Lincrement64:
.long 1,0,0,0
.Lincrement1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
.Lkey_rotate:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
.Lkey_rotate192:
.long 0x04070605,0x04070605,0x04070605,0x04070605
.Lkey_rcon1:
.long 1,1,1,1
.Lkey_rcon1b:
.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
Prom11111/plumbingtech | 11,047 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/vpaes-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 4
_vpaes_encrypt_core:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa L$k_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa L$k_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq L$k_mc_backward(%rip),%r10
jmp L$enc_entry
.p2align 4
L$enc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
L$enc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz L$enc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.p2align 4
_vpaes_encrypt_core_2x:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa L$k_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq L$k_mc_backward(%rip),%r10
jmp L$enc2x_entry
.p2align 4
L$enc2x_loop:
movdqa L$k_sb1(%rip),%xmm4
movdqa L$k_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa L$k_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa L$k_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
L$enc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz L$enc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
ret
.p2align 4
_vpaes_schedule_core:
call _vpaes_preheat
movdqa L$k_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq L$k_ipt(%rip),%r11
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq L$k_sr(%rip),%r10
movdqu %xmm0,(%rdx)
L$schedule_go:
cmpl $192,%esi
ja L$schedule_256
L$schedule_128:
movl $10,%esi
L$oop_schedule_128:
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
jmp L$oop_schedule_128
.p2align 4
L$schedule_256:
movdqu 16(%rdi),%xmm0
call _vpaes_schedule_transform
movl $7,%esi
L$oop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp L$oop_schedule_256
.p2align 4
L$schedule_mangle_last:
leaq L$k_deskew(%rip),%r11
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq L$k_opt(%rip),%r11
addq $32,%rdx
L$schedule_mangle_last_dec:
addq $-16,%rdx
pxor L$k_s63(%rip),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.p2align 4
_vpaes_schedule_round:
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor L$k_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.p2align 4
_vpaes_schedule_transform:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.p2align 4
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa L$k_mc_forward(%rip),%xmm5
addq $16,%rdx
pxor L$k_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
L$schedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.p2align 4
_vpaes_set_encrypt_key:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+5(%rip)
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _vpaes_schedule_core
xorl %eax,%eax
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.p2align 4
_vpaes_ctr32_encrypt_blocks:
_CET_ENDBR
xchgq %rcx,%rdx
testq %rcx,%rcx
jz L$ctr32_abort
movdqu (%r8),%xmm0
movdqa L$ctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb L$rev_ctr(%rip),%xmm6
testq $1,%rcx
jz L$ctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz L$ctr32_done
L$ctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
L$ctr32_loop:
movdqa L$rev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa L$ctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz L$ctr32_loop
L$ctr32_done:
L$ctr32_abort:
ret
.p2align 4
_vpaes_preheat:
leaq L$k_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
ret
.section __DATA,__const
.p2align 6
_vpaes_consts:
L$k_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
L$k_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
L$k_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
L$k_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
L$k_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
L$k_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
L$k_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
L$k_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
L$k_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
L$k_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
L$k_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
L$k_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
L$k_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
L$rev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
L$ctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
L$ctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.p2align 6
.text
#endif
|
Prom11111/plumbingtech | 78,605 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/p256-x86_64-asm-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.section .rodata
.align 64
.Lpoly:
.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
.LOne:
.long 1,1,1,1,1,1,1,1
.LTwo:
.long 2,2,2,2,2,2,2,2
.LThree:
.long 3,3,3,3,3,3,3,3
.LONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
.Lord:
.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
.LordK:
.quad 0xccd1c8aaee00bc4f
.text
.globl ecp_nistz256_neg
.hidden ecp_nistz256_neg
.type ecp_nistz256_neg,@function
.align 32
ecp_nistz256_neg:
.cfi_startproc
_CET_ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
.Lneg_body:
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
xorq %r13,%r13
subq 0(%rsi),%r8
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r8,%rax
sbbq 24(%rsi),%r11
leaq .Lpoly(%rip),%rsi
movq %r9,%rdx
sbbq $0,%r13
addq 0(%rsi),%r8
movq %r10,%rcx
adcq 8(%rsi),%r9
adcq 16(%rsi),%r10
movq %r11,%r12
adcq 24(%rsi),%r11
testq %r13,%r13
cmovzq %rax,%r8
cmovzq %rdx,%r9
movq %r8,0(%rdi)
cmovzq %rcx,%r10
movq %r9,8(%rdi)
cmovzq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq 0(%rsp),%r13
.cfi_restore %r13
movq 8(%rsp),%r12
.cfi_restore %r12
leaq 16(%rsp),%rsp
.cfi_adjust_cfa_offset -16
.Lneg_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_neg,.-ecp_nistz256_neg
.globl ecp_nistz256_ord_mul_mont_nohw
.hidden ecp_nistz256_ord_mul_mont_nohw
.type ecp_nistz256_ord_mul_mont_nohw,@function
.align 32
ecp_nistz256_ord_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_mul_body:
movq 0(%rdx),%rax
movq %rdx,%rbx
leaq .Lord(%rip),%r14
movq .LordK(%rip),%r15
movq %rax,%rcx
mulq 0(%rsi)
movq %rax,%r8
movq %rcx,%rax
movq %rdx,%r9
mulq 8(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq 16(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %r8,%r13
imulq %r15,%r8
movq %rdx,%r11
mulq 24(%rsi)
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq 0(%r14)
movq %r8,%rbp
addq %rax,%r13
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rcx
subq %r8,%r10
sbbq $0,%r8
mulq 8(%r14)
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %rbp,%rax
adcq %rdx,%r10
movq %rbp,%rdx
adcq $0,%r8
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 8(%rbx),%rax
sbbq %rdx,%rbp
addq %r8,%r11
adcq %rbp,%r12
adcq $0,%r13
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %r9,%rcx
imulq %r15,%r9
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r12
adcq $0,%rdx
xorq %r8,%r8
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
mulq 0(%r14)
movq %r9,%rbp
addq %rax,%rcx
movq %r9,%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%r9
mulq 8(%r14)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq %rdx,%r11
movq %rbp,%rdx
adcq $0,%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r12
movq 16(%rbx),%rax
sbbq %rdx,%rbp
addq %r9,%r12
adcq %rbp,%r13
adcq $0,%r8
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %r10,%rcx
imulq %r15,%r10
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r13
adcq $0,%rdx
xorq %r9,%r9
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
mulq 0(%r14)
movq %r10,%rbp
addq %rax,%rcx
movq %r10,%rax
adcq %rdx,%rcx
subq %r10,%r12
sbbq $0,%r10
mulq 8(%r14)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq %rdx,%r12
movq %rbp,%rdx
adcq $0,%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r13
movq 24(%rbx),%rax
sbbq %rdx,%rbp
addq %r10,%r13
adcq %rbp,%r8
adcq $0,%r9
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rcx,%rax
adcq $0,%rdx
movq %r11,%rcx
imulq %r15,%r11
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r8
adcq $0,%rdx
xorq %r10,%r10
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
mulq 0(%r14)
movq %r11,%rbp
addq %rax,%rcx
movq %r11,%rax
adcq %rdx,%rcx
subq %r11,%r13
sbbq $0,%r11
mulq 8(%r14)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq %rdx,%r13
movq %rbp,%rdx
adcq $0,%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
sbbq %rdx,%rbp
addq %r11,%r8
adcq %rbp,%r9
adcq $0,%r10
movq %r12,%rsi
subq 0(%r14),%r12
movq %r13,%r11
sbbq 8(%r14),%r13
movq %r8,%rcx
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rsi,%r12
cmovcq %r11,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_mul_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_mul_mont_nohw,.-ecp_nistz256_ord_mul_mont_nohw
.globl ecp_nistz256_ord_sqr_mont_nohw
.hidden ecp_nistz256_ord_sqr_mont_nohw
.type ecp_nistz256_ord_sqr_mont_nohw,@function
.align 32
ecp_nistz256_ord_sqr_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_sqr_body:
movq 0(%rsi),%r8
movq 8(%rsi),%rax
movq 16(%rsi),%r14
movq 24(%rsi),%r15
leaq .Lord(%rip),%rsi
movq %rdx,%rbx
jmp .Loop_ord_sqr
.align 32
.Loop_ord_sqr:
movq %rax,%rbp
mulq %r8
movq %rax,%r9
.byte 102,72,15,110,205
movq %r14,%rax
movq %rdx,%r10
mulq %r8
addq %rax,%r10
movq %r15,%rax
.byte 102,73,15,110,214
adcq $0,%rdx
movq %rdx,%r11
mulq %r8
addq %rax,%r11
movq %r15,%rax
.byte 102,73,15,110,223
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
movq %rax,%r13
movq %r14,%rax
movq %rdx,%r14
mulq %rbp
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r15
mulq %rbp
addq %rax,%r12
adcq $0,%rdx
addq %r15,%r12
adcq %rdx,%r13
adcq $0,%r14
xorq %r15,%r15
movq %r8,%rax
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
.byte 102,72,15,126,200
movq %rdx,%rbp
mulq %rax
addq %rbp,%r9
adcq %rax,%r10
.byte 102,72,15,126,208
adcq $0,%rdx
movq %rdx,%rbp
mulq %rax
addq %rbp,%r11
adcq %rax,%r12
.byte 102,72,15,126,216
adcq $0,%rdx
movq %rdx,%rbp
movq %r8,%rcx
imulq 32(%rsi),%r8
mulq %rax
addq %rbp,%r13
adcq %rax,%r14
movq 0(%rsi),%rax
adcq %rdx,%r15
mulq %r8
movq %r8,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r8,%r10
sbbq $0,%rbp
mulq %r8
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %r8,%rax
adcq %rdx,%r10
movq %r8,%rdx
adcq $0,%rbp
movq %r9,%rcx
imulq 32(%rsi),%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 0(%rsi),%rax
sbbq %rdx,%r8
addq %rbp,%r11
adcq $0,%r8
mulq %r9
movq %r9,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%rbp
mulq %r9
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %r9,%rax
adcq %rdx,%r11
movq %r9,%rdx
adcq $0,%rbp
movq %r10,%rcx
imulq 32(%rsi),%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
movq 0(%rsi),%rax
sbbq %rdx,%r9
addq %rbp,%r8
adcq $0,%r9
mulq %r10
movq %r10,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r10,%r8
sbbq $0,%rbp
mulq %r10
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %r10,%rax
adcq %rdx,%r8
movq %r10,%rdx
adcq $0,%rbp
movq %r11,%rcx
imulq 32(%rsi),%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r9
movq 0(%rsi),%rax
sbbq %rdx,%r10
addq %rbp,%r9
adcq $0,%r10
mulq %r11
movq %r11,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r11,%r9
sbbq $0,%rbp
mulq %r11
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
movq %r11,%rdx
adcq $0,%rbp
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r10
sbbq %rdx,%r11
addq %rbp,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r12,%r8
adcq %r13,%r9
movq %r8,%r12
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%rax
adcq $0,%rdx
subq 0(%rsi),%r8
movq %r10,%r14
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r15
sbbq 24(%rsi),%r11
sbbq $0,%rdx
cmovcq %r12,%r8
cmovncq %r9,%rax
cmovncq %r10,%r14
cmovncq %r11,%r15
decq %rbx
jnz .Loop_ord_sqr
movq %r8,0(%rdi)
movq %rax,8(%rdi)
pxor %xmm1,%xmm1
movq %r14,16(%rdi)
pxor %xmm2,%xmm2
movq %r15,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_sqr_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_sqr_mont_nohw,.-ecp_nistz256_ord_sqr_mont_nohw
.globl ecp_nistz256_ord_mul_mont_adx
.hidden ecp_nistz256_ord_mul_mont_adx
.type ecp_nistz256_ord_mul_mont_adx,@function
.align 32
ecp_nistz256_ord_mul_mont_adx:
.cfi_startproc
.Lecp_nistz256_ord_mul_mont_adx:
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
leaq .Lord-128(%rip),%r14
movq .LordK(%rip),%r15
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
mulxq %r11,%rbp,%r11
addq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
mulxq %r15,%rdx,%rax
adcq %rbp,%r10
adcq %rcx,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24+128(%r14),%rcx,%rbp
movq 8(%rbx),%rdx
adcxq %rcx,%r11
adoxq %rbp,%r12
adcxq %r8,%r12
adoxq %r8,%r13
adcq $0,%r13
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%r14),%rcx,%rbp
movq 16(%rbx),%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r9,%r13
adoxq %r9,%r8
adcq $0,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%r14),%rcx,%rbp
movq 24(%rbx),%rdx
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r10,%r8
adoxq %r10,%r9
adcq $0,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r8
adoxq %rbp,%r9
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%r14),%rcx,%rbp
leaq 128(%r14),%r14
movq %r12,%rbx
adcxq %rcx,%r8
adoxq %rbp,%r9
movq %r13,%rdx
adcxq %r11,%r9
adoxq %r11,%r10
adcq $0,%r10
movq %r8,%rcx
subq 0(%r14),%r12
sbbq 8(%r14),%r13
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_mulx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_mul_mont_adx,.-ecp_nistz256_ord_mul_mont_adx
.globl ecp_nistz256_ord_sqr_mont_adx
.hidden ecp_nistz256_ord_sqr_mont_adx
.type ecp_nistz256_ord_sqr_mont_adx,@function
.align 32
ecp_nistz256_ord_sqr_mont_adx:
.cfi_startproc
_CET_ENDBR
.Lecp_nistz256_ord_sqr_mont_adx:
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_sqrx_body:
movq %rdx,%rbx
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq .Lord(%rip),%rsi
jmp .Loop_ord_sqrx
.align 32
.Loop_ord_sqrx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
movq %rdx,%rax
.byte 102,73,15,110,206
mulxq %r8,%rbp,%r12
movq %r14,%rdx
addq %rcx,%r10
.byte 102,73,15,110,215
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq %rax,%rdx
.byte 102,73,15,110,216
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
.byte 102,72,15,126,202
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
.byte 102,72,15,126,210
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
mulxq %rdx,%rcx,%rbp
.byte 0x67
.byte 102,72,15,126,218
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
adoxq %rbp,%r13
mulxq %rdx,%rcx,%rax
adoxq %rcx,%r14
adoxq %rax,%r15
movq %r8,%rdx
mulxq 32(%rsi),%rdx,%rcx
xorq %rax,%rax
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
adcxq %rax,%r8
movq %r9,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
adoxq %rax,%r9
movq %r10,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
adcxq %rax,%r10
movq %r11,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
adoxq %rax,%r11
addq %r8,%r12
adcq %r13,%r9
movq %r12,%rdx
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%r14
adcq $0,%rax
subq 0(%rsi),%r12
movq %r10,%r15
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r8
sbbq 24(%rsi),%r11
sbbq $0,%rax
cmovncq %r12,%rdx
cmovncq %r9,%r14
cmovncq %r10,%r15
cmovncq %r11,%r8
decq %rbx
jnz .Loop_ord_sqrx
movq %rdx,0(%rdi)
movq %r14,8(%rdi)
pxor %xmm1,%xmm1
movq %r15,16(%rdi)
pxor %xmm2,%xmm2
movq %r8,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_sqrx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_sqr_mont_adx,.-ecp_nistz256_ord_sqr_mont_adx
.globl ecp_nistz256_mul_mont_nohw
.hidden ecp_nistz256_mul_mont_nohw
.type ecp_nistz256_mul_mont_nohw,@function
.align 32
ecp_nistz256_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lmul_body:
movq %rdx,%rbx
movq 0(%rdx),%rax
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
call __ecp_nistz256_mul_montq
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lmul_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_mul_mont_nohw,.-ecp_nistz256_mul_mont_nohw
.type __ecp_nistz256_mul_montq,@function
.align 32
__ecp_nistz256_mul_montq:
.cfi_startproc
movq %rax,%rbp
mulq %r9
movq .Lpoly+8(%rip),%r14
movq %rax,%r8
movq %rbp,%rax
movq %rdx,%r9
mulq %r10
movq .Lpoly+24(%rip),%r15
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %r11
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r12
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
xorq %r13,%r13
movq %rdx,%r12
movq %r8,%rbp
shlq $32,%r8
mulq %r15
shrq $32,%rbp
addq %r8,%r9
adcq %rbp,%r10
adcq %rax,%r11
movq 8(%rbx),%rax
adcq %rdx,%r12
adcq $0,%r13
xorq %r8,%r8
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
movq %r9,%rbp
shlq $32,%r9
mulq %r15
shrq $32,%rbp
addq %r9,%r10
adcq %rbp,%r11
adcq %rax,%r12
movq 16(%rbx),%rax
adcq %rdx,%r13
adcq $0,%r8
xorq %r9,%r9
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
movq %r10,%rbp
shlq $32,%r10
mulq %r15
shrq $32,%rbp
addq %r10,%r11
adcq %rbp,%r12
adcq %rax,%r13
movq 24(%rbx),%rax
adcq %rdx,%r8
adcq $0,%r9
xorq %r10,%r10
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
movq %r11,%rbp
shlq $32,%r11
mulq %r15
shrq $32,%rbp
addq %r11,%r12
adcq %rbp,%r13
movq %r12,%rcx
adcq %rax,%r8
adcq %rdx,%r9
movq %r13,%rbp
adcq $0,%r10
subq $-1,%r12
movq %r8,%rbx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rdx
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rcx,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rbx,%r8
movq %r13,8(%rdi)
cmovcq %rdx,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
.globl ecp_nistz256_sqr_mont_nohw
.hidden ecp_nistz256_sqr_mont_nohw
.type ecp_nistz256_sqr_mont_nohw,@function
.align 32
ecp_nistz256_sqr_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lsqr_body:
movq 0(%rsi),%rax
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
call __ecp_nistz256_sqr_montq
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lsqr_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_sqr_mont_nohw,.-ecp_nistz256_sqr_mont_nohw
.type __ecp_nistz256_sqr_montq,@function
.align 32
__ecp_nistz256_sqr_montq:
.cfi_startproc
movq %rax,%r13
mulq %r14
movq %rax,%r9
movq %r15,%rax
movq %rdx,%r10
mulq %r13
addq %rax,%r10
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r13
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq %r14
addq %rax,%r12
movq %r8,%rax
adcq $0,%rdx
addq %rbp,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r15
xorq %r15,%r15
addq %rax,%r13
movq 0(%rsi),%rax
movq %rdx,%r14
adcq $0,%r14
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
movq 8(%rsi),%rax
movq %rdx,%rcx
mulq %rax
addq %rcx,%r9
adcq %rax,%r10
movq 16(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r11
adcq %rax,%r12
movq 24(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r13
adcq %rax,%r14
movq %r8,%rax
adcq %rdx,%r15
movq .Lpoly+8(%rip),%rsi
movq .Lpoly+24(%rip),%rbp
movq %r8,%rcx
shlq $32,%r8
mulq %rbp
shrq $32,%rcx
addq %r8,%r9
adcq %rcx,%r10
adcq %rax,%r11
movq %r9,%rax
adcq $0,%rdx
movq %r9,%rcx
shlq $32,%r9
movq %rdx,%r8
mulq %rbp
shrq $32,%rcx
addq %r9,%r10
adcq %rcx,%r11
adcq %rax,%r8
movq %r10,%rax
adcq $0,%rdx
movq %r10,%rcx
shlq $32,%r10
movq %rdx,%r9
mulq %rbp
shrq $32,%rcx
addq %r10,%r11
adcq %rcx,%r8
adcq %rax,%r9
movq %r11,%rax
adcq $0,%rdx
movq %r11,%rcx
shlq $32,%r11
movq %rdx,%r10
mulq %rbp
shrq $32,%rcx
addq %r11,%r8
adcq %rcx,%r9
adcq %rax,%r10
adcq $0,%rdx
xorq %r11,%r11
addq %r8,%r12
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %rdx,%r15
movq %r13,%r9
adcq $0,%r11
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%rcx
sbbq %rbp,%r15
sbbq $0,%r11
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %rcx,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
.globl ecp_nistz256_mul_mont_adx
.hidden ecp_nistz256_mul_mont_adx
.type ecp_nistz256_mul_mont_adx,@function
.align 32
ecp_nistz256_mul_mont_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lmulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lmulx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_mul_mont_adx,.-ecp_nistz256_mul_mont_adx
.type __ecp_nistz256_mul_montx,@function
.align 32
__ecp_nistz256_mul_montx:
.cfi_startproc
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
movq $32,%r14
xorq %r13,%r13
mulxq %r11,%rbp,%r11
movq .Lpoly+24(%rip),%r15
adcq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
adcq %rbp,%r10
shlxq %r14,%r8,%rbp
adcq %rcx,%r11
shrxq %r14,%r8,%rcx
adcq $0,%r12
addq %rbp,%r9
adcq %rcx,%r10
mulxq %r15,%rcx,%rbp
movq 8(%rbx),%rdx
adcq %rcx,%r11
adcq %rbp,%r12
adcq $0,%r13
xorq %r8,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
adcxq %rcx,%r12
shlxq %r14,%r9,%rcx
adoxq %rbp,%r13
shrxq %r14,%r9,%rbp
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
addq %rcx,%r10
adcq %rbp,%r11
mulxq %r15,%rcx,%rbp
movq 16(%rbx),%rdx
adcq %rcx,%r12
adcq %rbp,%r13
adcq $0,%r8
xorq %r9,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
adcxq %rcx,%r13
shlxq %r14,%r10,%rcx
adoxq %rbp,%r8
shrxq %r14,%r10,%rbp
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
addq %rcx,%r11
adcq %rbp,%r12
mulxq %r15,%rcx,%rbp
movq 24(%rbx),%rdx
adcq %rcx,%r13
adcq %rbp,%r8
adcq $0,%r9
xorq %r10,%r10
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
adcxq %rcx,%r8
shlxq %r14,%r11,%rcx
adoxq %rbp,%r9
shrxq %r14,%r11,%rbp
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
addq %rcx,%r12
adcq %rbp,%r13
mulxq %r15,%rcx,%rbp
movq %r12,%rbx
movq .Lpoly+8(%rip),%r14
adcq %rcx,%r8
movq %r13,%rdx
adcq %rbp,%r9
adcq $0,%r10
xorl %eax,%eax
movq %r8,%rcx
sbbq $-1,%r12
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rbp
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %rbp,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
.globl ecp_nistz256_sqr_mont_adx
.hidden ecp_nistz256_sqr_mont_adx
.type ecp_nistz256_sqr_mont_adx,@function
.align 32
ecp_nistz256_sqr_mont_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lsqrx_body:
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq -128(%rsi),%rsi
call __ecp_nistz256_sqr_montx
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lsqrx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_sqr_mont_adx,.-ecp_nistz256_sqr_mont_adx
.type __ecp_nistz256_sqr_montx,@function
.align 32
__ecp_nistz256_sqr_montx:
.cfi_startproc
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
xorl %eax,%eax
adcq %rcx,%r10
mulxq %r8,%rbp,%r12
movq %r14,%rdx
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq 0+128(%rsi),%rdx
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
movq 8+128(%rsi),%rdx
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
movq 16+128(%rsi),%rdx
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
.byte 0x67
mulxq %rdx,%rcx,%rbp
movq 24+128(%rsi),%rdx
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
movq $32,%rsi
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %rdx,%rcx,%rax
movq .Lpoly+24(%rip),%rdx
adoxq %rcx,%r14
shlxq %rsi,%r8,%rcx
adoxq %rax,%r15
shrxq %rsi,%r8,%rax
movq %rdx,%rbp
addq %rcx,%r9
adcq %rax,%r10
mulxq %r8,%rcx,%r8
adcq %rcx,%r11
shlxq %rsi,%r9,%rcx
adcq $0,%r8
shrxq %rsi,%r9,%rax
addq %rcx,%r10
adcq %rax,%r11
mulxq %r9,%rcx,%r9
adcq %rcx,%r8
shlxq %rsi,%r10,%rcx
adcq $0,%r9
shrxq %rsi,%r10,%rax
addq %rcx,%r11
adcq %rax,%r8
mulxq %r10,%rcx,%r10
adcq %rcx,%r9
shlxq %rsi,%r11,%rcx
adcq $0,%r10
shrxq %rsi,%r11,%rax
addq %rcx,%r8
adcq %rax,%r9
mulxq %r11,%rcx,%r11
adcq %rcx,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r8,%r12
movq .Lpoly+8(%rip),%rsi
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %r11,%r15
movq %r13,%r9
adcq $0,%rdx
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%r11
sbbq %rbp,%r15
sbbq $0,%rdx
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %r11,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
.globl ecp_nistz256_select_w5_nohw
.hidden ecp_nistz256_select_w5_nohw
.type ecp_nistz256_select_w5_nohw,@function
.align 32
ecp_nistz256_select_w5_nohw:
.cfi_startproc
_CET_ENDBR
movdqa .LOne(%rip),%xmm0
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movdqa %xmm0,%xmm8
pshufd $0,%xmm1,%xmm1
movq $16,%rax
.Lselect_loop_sse_w5:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
pcmpeqd %xmm1,%xmm15
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
movdqa 64(%rsi),%xmm13
movdqa 80(%rsi),%xmm14
leaq 96(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
pand %xmm15,%xmm13
por %xmm12,%xmm5
pand %xmm15,%xmm14
por %xmm13,%xmm6
por %xmm14,%xmm7
decq %rax
jnz .Lselect_loop_sse_w5
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
movdqu %xmm6,64(%rdi)
movdqu %xmm7,80(%rdi)
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w5_nohw:
.size ecp_nistz256_select_w5_nohw,.-ecp_nistz256_select_w5_nohw
.globl ecp_nistz256_select_w7_nohw
.hidden ecp_nistz256_select_w7_nohw
.type ecp_nistz256_select_w7_nohw,@function
.align 32
ecp_nistz256_select_w7_nohw:
.cfi_startproc
_CET_ENDBR
movdqa .LOne(%rip),%xmm8
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa %xmm8,%xmm0
pshufd $0,%xmm1,%xmm1
movq $64,%rax
.Lselect_loop_sse_w7:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
pcmpeqd %xmm1,%xmm15
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
leaq 64(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
prefetcht0 255(%rsi)
por %xmm12,%xmm5
decq %rax
jnz .Lselect_loop_sse_w7
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w7_nohw:
.size ecp_nistz256_select_w7_nohw,.-ecp_nistz256_select_w7_nohw
.globl ecp_nistz256_select_w5_avx2
.hidden ecp_nistz256_select_w5_avx2
.type ecp_nistz256_select_w5_avx2,@function
.align 32
ecp_nistz256_select_w5_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqa .LTwo(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vpxor %ymm4,%ymm4,%ymm4
vmovdqa .LOne(%rip),%ymm5
vmovdqa .LTwo(%rip),%ymm10
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $8,%rax
.Lselect_loop_avx2_w5:
vmovdqa 0(%rsi),%ymm6
vmovdqa 32(%rsi),%ymm7
vmovdqa 64(%rsi),%ymm8
vmovdqa 96(%rsi),%ymm11
vmovdqa 128(%rsi),%ymm12
vmovdqa 160(%rsi),%ymm13
vpcmpeqd %ymm1,%ymm5,%ymm9
vpcmpeqd %ymm1,%ymm10,%ymm14
vpaddd %ymm0,%ymm5,%ymm5
vpaddd %ymm0,%ymm10,%ymm10
leaq 192(%rsi),%rsi
vpand %ymm9,%ymm6,%ymm6
vpand %ymm9,%ymm7,%ymm7
vpand %ymm9,%ymm8,%ymm8
vpand %ymm14,%ymm11,%ymm11
vpand %ymm14,%ymm12,%ymm12
vpand %ymm14,%ymm13,%ymm13
vpxor %ymm6,%ymm2,%ymm2
vpxor %ymm7,%ymm3,%ymm3
vpxor %ymm8,%ymm4,%ymm4
vpxor %ymm11,%ymm2,%ymm2
vpxor %ymm12,%ymm3,%ymm3
vpxor %ymm13,%ymm4,%ymm4
decq %rax
jnz .Lselect_loop_avx2_w5
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,64(%rdi)
vzeroupper
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w5_avx2:
.size ecp_nistz256_select_w5_avx2,.-ecp_nistz256_select_w5_avx2
.globl ecp_nistz256_select_w7_avx2
.hidden ecp_nistz256_select_w7_avx2
.type ecp_nistz256_select_w7_avx2,@function
.align 32
ecp_nistz256_select_w7_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqa .LThree(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vmovdqa .LOne(%rip),%ymm4
vmovdqa .LTwo(%rip),%ymm8
vmovdqa .LThree(%rip),%ymm12
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $21,%rax
.Lselect_loop_avx2_w7:
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm9
vmovdqa 96(%rsi),%ymm10
vmovdqa 128(%rsi),%ymm13
vmovdqa 160(%rsi),%ymm14
vpcmpeqd %ymm1,%ymm4,%ymm7
vpcmpeqd %ymm1,%ymm8,%ymm11
vpcmpeqd %ymm1,%ymm12,%ymm15
vpaddd %ymm0,%ymm4,%ymm4
vpaddd %ymm0,%ymm8,%ymm8
vpaddd %ymm0,%ymm12,%ymm12
leaq 192(%rsi),%rsi
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpand %ymm11,%ymm9,%ymm9
vpand %ymm11,%ymm10,%ymm10
vpand %ymm15,%ymm13,%ymm13
vpand %ymm15,%ymm14,%ymm14
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vpxor %ymm9,%ymm2,%ymm2
vpxor %ymm10,%ymm3,%ymm3
vpxor %ymm13,%ymm2,%ymm2
vpxor %ymm14,%ymm3,%ymm3
decq %rax
jnz .Lselect_loop_avx2_w7
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vpcmpeqd %ymm1,%ymm4,%ymm7
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vzeroupper
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w7_avx2:
.size ecp_nistz256_select_w7_avx2,.-ecp_nistz256_select_w7_avx2
.type __ecp_nistz256_add_toq,@function
.align 32
__ecp_nistz256_add_toq:
.cfi_startproc
xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
.type __ecp_nistz256_sub_fromq,@function
.align 32
__ecp_nistz256_sub_fromq:
.cfi_startproc
subq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq %r11,%r11
addq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
testq %r11,%r11
cmovzq %rax,%r12
cmovzq %rbp,%r13
movq %r12,0(%rdi)
cmovzq %rcx,%r8
movq %r13,8(%rdi)
cmovzq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
.type __ecp_nistz256_subq,@function
.align 32
__ecp_nistz256_subq:
.cfi_startproc
subq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq %r11,%r11
addq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
testq %r11,%r11
cmovnzq %rax,%r12
cmovnzq %rbp,%r13
cmovnzq %rcx,%r8
cmovnzq %r10,%r9
ret
.cfi_endproc
.size __ecp_nistz256_subq,.-__ecp_nistz256_subq
.type __ecp_nistz256_mul_by_2q,@function
.align 32
__ecp_nistz256_mul_by_2q:
.cfi_startproc
xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
.globl ecp_nistz256_point_double_nohw
.hidden ecp_nistz256_point_double_nohw
.type ecp_nistz256_point_double_nohw,@function
.align 32
ecp_nistz256_point_double_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $160+8,%rsp
.cfi_adjust_cfa_offset 32*5+8
.Lpoint_doubleq_body:
.Lpoint_double_shortcutq:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq .Lpoly+8(%rip),%r14
movq .Lpoly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-0(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 32(%rbx),%rax
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-0(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montq
call __ecp_nistz256_mul_by_2q
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montq
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rax
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 0+32(%rsp),%rax
movq 8+32(%rsp),%r14
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montq
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subq
movq 32(%rsp),%rax
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-0(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromq
leaq 160+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_doubleq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_double_nohw,.-ecp_nistz256_point_double_nohw
.globl ecp_nistz256_point_add_nohw
.hidden ecp_nistz256_point_add_nohw
.type ecp_nistz256_point_add_nohw,@function
.align 32
ecp_nistz256_point_add_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $576+8,%rsp
.cfi_adjust_cfa_offset 32*18+8
.Lpoint_addq_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rax
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-0(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 416(%rsp),%rax
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 512(%rsp),%rax
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq 0+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 480(%rsp),%rax
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz .Ladd_proceedq
testq %r9,%r9
jz .Ladd_doubleq
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp .Ladd_doneq
.align 32
.Ladd_doubleq:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
.cfi_adjust_cfa_offset -416
jmp .Lpoint_double_shortcutq
.cfi_adjust_cfa_offset 416
.align 32
.Ladd_proceedq:
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq 0+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0(%rsp),%rax
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 160(%rsp),%rax
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
.Ladd_doneq:
leaq 576+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_addq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_nohw,.-ecp_nistz256_point_add_nohw
.globl ecp_nistz256_point_add_affine_nohw
.hidden ecp_nistz256_point_add_affine_nohw
.type ecp_nistz256_point_add_affine_nohw,@function
.align 32
ecp_nistz256_point_add_affine_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $480+8,%rsp
.cfi_adjust_cfa_offset 32*15+8
.Ladd_affineq_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-0(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rax
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-0(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+96(%rsp),%rax
movq 8+96(%rsp),%r14
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq 0+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rax
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq 0+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand .LONE_mont(%rip),%xmm2
pand .LONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Ladd_affineq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_affine_nohw,.-ecp_nistz256_point_add_affine_nohw
.type __ecp_nistz256_add_tox,@function
.align 32
__ecp_nistz256_add_tox:
.cfi_startproc
xorq %r11,%r11
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
.type __ecp_nistz256_sub_fromx,@function
.align 32
__ecp_nistz256_sub_fromx:
.cfi_startproc
xorq %r11,%r11
sbbq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq $0,%r11
xorq %r10,%r10
adcq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
btq $0,%r11
cmovncq %rax,%r12
cmovncq %rbp,%r13
movq %r12,0(%rdi)
cmovncq %rcx,%r8
movq %r13,8(%rdi)
cmovncq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
.type __ecp_nistz256_subx,@function
.align 32
__ecp_nistz256_subx:
.cfi_startproc
xorq %r11,%r11
sbbq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq $0,%r11
xorq %r9,%r9
adcq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
btq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
cmovcq %rcx,%r8
cmovcq %r10,%r9
ret
.cfi_endproc
.size __ecp_nistz256_subx,.-__ecp_nistz256_subx
.type __ecp_nistz256_mul_by_2x,@function
.align 32
__ecp_nistz256_mul_by_2x:
.cfi_startproc
xorq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
.globl ecp_nistz256_point_double_adx
.hidden ecp_nistz256_point_double_adx
.type ecp_nistz256_point_double_adx,@function
.align 32
ecp_nistz256_point_double_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $160+8,%rsp
.cfi_adjust_cfa_offset 32*5+8
.Lpoint_doublex_body:
.Lpoint_double_shortcutx:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq .Lpoly+8(%rip),%r14
movq .Lpoly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-128(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 32(%rbx),%rdx
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-128(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montx
call __ecp_nistz256_mul_by_2x
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montx
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rdx
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 0+32(%rsp),%rdx
movq 8+32(%rsp),%r14
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montx
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subx
movq 32(%rsp),%rdx
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-128(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromx
leaq 160+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_doublex_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_double_adx,.-ecp_nistz256_point_double_adx
.globl ecp_nistz256_point_add_adx
.hidden ecp_nistz256_point_add_adx
.type ecp_nistz256_point_add_adx,@function
.align 32
ecp_nistz256_point_add_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $576+8,%rsp
.cfi_adjust_cfa_offset 32*18+8
.Lpoint_addx_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-128(%rsi),%rsi
movq %rdx,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rdx
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-128(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 416(%rsp),%rdx
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 512(%rsp),%rdx
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq -128+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 480(%rsp),%rdx
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz .Ladd_proceedx
testq %r9,%r9
jz .Ladd_doublex
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp .Ladd_donex
.align 32
.Ladd_doublex:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
.cfi_adjust_cfa_offset -416
jmp .Lpoint_double_shortcutx
.cfi_adjust_cfa_offset 416
.align 32
.Ladd_proceedx:
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq -128+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%rdx
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 160(%rsp),%rdx
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
.Ladd_donex:
leaq 576+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_addx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_adx,.-ecp_nistz256_point_add_adx
.globl ecp_nistz256_point_add_affine_adx
.hidden ecp_nistz256_point_add_affine_adx
.type ecp_nistz256_point_add_affine_adx,@function
.align 32
ecp_nistz256_point_add_affine_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $480+8,%rsp
.cfi_adjust_cfa_offset 32*15+8
.Ladd_affinex_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-128(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rdx
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-128(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+96(%rsp),%rdx
movq 8+96(%rsp),%r14
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq -128+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rdx
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq -128+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand .LONE_mont(%rip),%xmm2
pand .LONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Ladd_affinex_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_affine_adx,.-ecp_nistz256_point_add_affine_adx
#endif
|
Prom11111/plumbingtech | 30,650 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/armv8-mont-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl bn_mul_mont_nohw
.def bn_mul_mont_nohw
.type 32
.endef
.align 5
bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_sqr8x_mont
.def bn_sqr8x_mont
.type 32
.endef
.align 5
bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_mul4x_mont
.def bn_mul4x_mont
.type 32
.endef
.align 5
bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Prom11111/plumbingtech | 73,987 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha20_poly1305_armv8-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.section .rodata
.align 7
Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
Linc:
.long 1,2,3,4
Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.def Lpoly_hash_ad_internal
.type 32
.endef
.align 6
Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, Lpoly_hash_intro
ret
Lpoly_hash_intro:
cmp x4, #16
b.lt Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lpoly_hash_ad_internal
Lpoly_hash_ad_tail:
cbz x4, Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lpoly_hash_ad_ret:
ret
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.def chacha20_poly1305_seal
.type 32
.endef
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
Lseal_main_loop:
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b Lseal_main_loop
Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b Lseal_tail
Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b Lseal_tail_64
Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt Lseal_tail16_compose_extra_in
add x3, x3, x12
Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_hash_extra:
cbz x4, Lseal_finalize
Lseal_hash_extra_loop:
cmp x4, #16
b.lt Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lseal_hash_extra_loop
Lseal_hash_extra_tail:
cbz x4, Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
b Lseal_tail
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.def chacha20_poly1305_open
.type 32
.endef
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
Lopen_main_loop:
cmp x2, #192
b.lt Lopen_tail
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, Lopen_main_loop_rounds_short
.align 5
Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt Lopen_main_loop_rounds
subs x6, x6, #1
b.ge Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_main_loop
Lopen_tail:
cbz x2, Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le Lopen_tail_64
cmp x2, #128
b.le Lopen_tail_128
Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, Lopen_tail_192_rounds_no_hash
Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt Lopen_tail_192_rounds
subs x6, x6, #1
b.ge Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
Lopen_tail_192_hash:
cbz x4, Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_tail_192_hash
Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b Lopen_tail_64_store
Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_128_rounds
cbz x4, Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_128_rounds
Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_tail_64_store
Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_64_rounds
cbz x4, Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_64_rounds
Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
Lopen_tail_64_store:
cmp x2, #16
b.lt Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b Lopen_tail_64_store
Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lopen_tail_16_store
Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_128_store:
cmp x2, #64
b.lt Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
Lopen_128_hash_64:
cbz x4, Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_128_hash_64
.cfi_endproc
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Prom11111/plumbingtech | 82,176 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesv8-gcm-armv8-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__ >= 8
.arch armv8-a+crypto
.text
.globl aes_gcm_enc_kernel
.def aes_gcm_enc_kernel
.type 32
.endef
.align 4
aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge Lenc_prepretail // do prepretail
Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt Lenc_main_loop
Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt Lenc_blocks_more_than_1
sub w12, w12, #1
b Lenc_blocks_less_than_1
Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl aes_gcm_dec_kernel
.def aes_gcm_dec_kernel
.type 32
.endef
.align 4
aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge Ldec_prepretail // do prepretail
Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt Ldec_main_loop
Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt Ldec_blocks_more_than_1
sub w12, w12, #1
b Ldec_blocks_less_than_1
Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Prom11111/plumbingtech | 60,192 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha256-armv4-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@ ====================================================================
@ SHA256 block procedure for ARMv4. May 2007.
@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
@ byte [on single-issue Xscale PXA250 core].
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
@ Cortex A8 core and ~20 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 16%
@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
@ September 2013.
@
@ Add NEON implementation. On Cortex A8 it was measured to process one
@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
@ code (meaning that latter performs sub-optimally, nothing was done
@ about it).
@ May 2014.
@
@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
#ifdef __KERNEL__
# define __ARM_ARCH __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those
@ instructions are manually-encoded. (See unsha256.)
.arch armv7-a
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.type K256,%object
.align 5
K256:
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size K256,.-K256
.word 0 @ terminator
.align 5
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,%function
sha256_block_data_order_nohw:
add r2,r1,r2,lsl#6 @ len to point at the end of inp
stmdb sp!,{r0,r1,r2,r4-r11,lr}
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
adr r14,K256
sub sp,sp,#16*4 @ alloca(X[16])
.Loop:
# if __ARM_ARCH>=7
ldr r2,[r1],#4
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ magic
eor r12,r12,r12
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 0
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 0
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 0==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 0<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 1
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 1
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 1==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 1<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 2
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 2
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 2==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 2<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 3
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 3
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 3==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 3<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 4
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 4
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 4==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 4<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 5
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 5==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 5<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 6
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 6
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 6==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 6<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 7
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 7==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 7<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 8
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 8
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 8==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 8<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 9
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 9
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 9==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 9<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 10
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 10
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 10==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 10<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 11
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 11
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 11==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 11<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 12
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 12
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 12==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 12<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 13
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 13
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 13==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 13<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 14
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 14
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 14==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 14<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 15
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 15
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 15==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 15<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
.Lrounds_16_xx:
@ ldr r2,[sp,#1*4] @ 16
@ ldr r1,[sp,#14*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#0*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#9*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 16==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 16<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#2*4] @ 17
@ ldr r1,[sp,#15*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#1*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#10*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 17==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 17<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#3*4] @ 18
@ ldr r1,[sp,#0*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#2*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#11*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 18==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 18<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#4*4] @ 19
@ ldr r1,[sp,#1*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#3*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#12*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 19==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 19<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#5*4] @ 20
@ ldr r1,[sp,#2*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#4*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#13*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 20==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 20<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#6*4] @ 21
@ ldr r1,[sp,#3*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#5*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#14*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 21==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 21<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#7*4] @ 22
@ ldr r1,[sp,#4*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#6*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#15*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 22==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 22<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#8*4] @ 23
@ ldr r1,[sp,#5*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#7*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#0*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 23==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 23<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#9*4] @ 24
@ ldr r1,[sp,#6*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#8*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#1*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 24==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 24<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#10*4] @ 25
@ ldr r1,[sp,#7*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#9*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#2*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 25==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 25<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#11*4] @ 26
@ ldr r1,[sp,#8*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#10*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#3*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 26==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 26<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#12*4] @ 27
@ ldr r1,[sp,#9*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#11*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#4*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 27==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 27<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#13*4] @ 28
@ ldr r1,[sp,#10*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#12*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#5*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 28==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 28<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#14*4] @ 29
@ ldr r1,[sp,#11*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#13*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#6*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 29==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 29<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#15*4] @ 30
@ ldr r1,[sp,#12*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#14*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#7*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 30==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 30<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#0*4] @ 31
@ ldr r1,[sp,#13*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#15*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#8*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 31==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 31<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
ite eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r3,[sp,#16*4] @ pull ctx
bne .Lrounds_16_xx
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r0,[r3,#0]
ldr r2,[r3,#4]
ldr r12,[r3,#8]
add r4,r4,r0
ldr r0,[r3,#12]
add r5,r5,r2
ldr r2,[r3,#16]
add r6,r6,r12
ldr r12,[r3,#20]
add r7,r7,r0
ldr r0,[r3,#24]
add r8,r8,r2
ldr r2,[r3,#28]
add r9,r9,r12
ldr r1,[sp,#17*4] @ pull inp
ldr r12,[sp,#18*4] @ pull inp+len
add r10,r10,r0
add r11,r11,r2
stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
cmp r1,r12
sub r14,r14,#256 @ rewind Ktbl
bne .Loop
add sp,sp,#19*4 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.LK256_shortcut_neon:
@ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode.
#if defined(__thumb2__)
.word K256-(.LK256_add_neon+4)
#else
.word K256-(.LK256_add_neon+8)
#endif
.globl sha256_block_data_order_neon
.hidden sha256_block_data_order_neon
.type sha256_block_data_order_neon,%function
.align 5
.skip 16
sha256_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r11,sp,#16*4+16
@ K256 is just at the boundary of being easily referenced by an ADR from
@ this function. In Arm mode, when building with __ARM_ARCH=6, it does
@ not fit. By moving code around, we could make it fit, but this is too
@ fragile. For simplicity, just load the offset from
@ .LK256_shortcut_neon.
@
@ TODO(davidben): adrl would avoid a load, but clang-assembler does not
@ support it. We might be able to emulate it with a macro, but Android's
@ did not work when I tried it.
@ https://android.googlesource.com/platform/ndk/+/refs/heads/main/docs/ClangMigration.md#arm
ldr r14,.LK256_shortcut_neon
.LK256_add_neon:
add r14,pc,r14
bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
mov sp,r11 @ alloca
add r2,r1,r2,lsl#6 @ len to point at the end of inp
vld1.8 {q0},[r1]!
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
vld1.32 {q8},[r14,:128]!
vld1.32 {q9},[r14,:128]!
vld1.32 {q10},[r14,:128]!
vld1.32 {q11},[r14,:128]!
vrev32.8 q0,q0 @ yes, even on
str r0,[sp,#64]
vrev32.8 q1,q1 @ big-endian
str r1,[sp,#68]
mov r1,sp
vrev32.8 q2,q2
str r2,[sp,#72]
vrev32.8 q3,q3
str r12,[sp,#76] @ save original sp
vadd.i32 q8,q8,q0
vadd.i32 q9,q9,q1
vst1.32 {q8},[r1,:128]!
vadd.i32 q10,q10,q2
vst1.32 {q9},[r1,:128]!
vadd.i32 q11,q11,q3
vst1.32 {q10},[r1,:128]!
vst1.32 {q11},[r1,:128]!
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r1,r1,#64
ldr r2,[sp,#0]
eor r12,r12,r12
eor r3,r5,r6
b .L_00_48
.align 4
.L_00_48:
vext.8 q8,q0,q1,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q2,q3,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q0,q0,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#4]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d7,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d7,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d7,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q0,q0,q9
add r10,r10,r2
ldr r2,[sp,#8]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d7,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d7,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d0,d0,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d0,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d0,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d0,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#12]
and r3,r3,r12
vshr.u32 d24,d0,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d0,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d1,d1,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q0
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q1,q2,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q3,q0,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q1,q1,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#20]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d1,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d1,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d1,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q1,q1,q9
add r6,r6,r2
ldr r2,[sp,#24]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d1,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d1,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d2,d2,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d2,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d2,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d2,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#28]
and r3,r3,r12
vshr.u32 d24,d2,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d2,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d3,d3,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q1
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
vext.8 q8,q2,q3,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q0,q1,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q2,q2,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#36]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d3,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d3,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d3,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q2,q2,q9
add r10,r10,r2
ldr r2,[sp,#40]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d3,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d3,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d4,d4,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d4,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d4,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d4,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#44]
and r3,r3,r12
vshr.u32 d24,d4,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d4,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d5,d5,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q2
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q3,q0,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q1,q2,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q3,q3,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#52]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d5,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d5,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d5,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q3,q3,q9
add r6,r6,r2
ldr r2,[sp,#56]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d5,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d5,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d6,d6,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d6,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d6,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d6,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#60]
and r3,r3,r12
vshr.u32 d24,d6,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d6,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d7,d7,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q3
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[r14]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
teq r2,#0 @ check for K256 terminator
ldr r2,[sp,#0]
sub r1,r1,#64
bne .L_00_48
ldr r1,[sp,#68]
ldr r0,[sp,#72]
sub r14,r14,#256 @ rewind r14
teq r1,r0
it eq
subeq r1,r1,#64 @ avoid SEGV
vld1.8 {q0},[r1]! @ load next input block
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
it ne
strne r1,[sp,#68]
mov r1,sp
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q0,q0
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q0
ldr r2,[sp,#4]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#8]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#12]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q1,q1
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q1
ldr r2,[sp,#20]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#24]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#28]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q2,q2
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q2
ldr r2,[sp,#36]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#40]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#44]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q3,q3
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q3
ldr r2,[sp,#52]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#56]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#60]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#64]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
ldr r0,[r2,#0]
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r12,[r2,#4]
ldr r3,[r2,#8]
ldr r1,[r2,#12]
add r4,r4,r0 @ accumulate
ldr r0,[r2,#16]
add r5,r5,r12
ldr r12,[r2,#20]
add r6,r6,r3
ldr r3,[r2,#24]
add r7,r7,r1
ldr r1,[r2,#28]
add r8,r8,r0
str r4,[r2],#4
add r9,r9,r12
str r5,[r2],#4
add r10,r10,r3
str r6,[r2],#4
add r11,r11,r1
str r7,[r2],#4
stmia r2,{r8,r9,r10,r11}
ittte ne
movne r1,sp
ldrne r2,[sp,#0]
eorne r12,r12,r12
ldreq sp,[sp,#76] @ restore original sp
itt ne
eorne r3,r5,r6
bne .L_00_48
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
#endif
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Prom11111/plumbingtech | 5,546 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-x86-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
.L_gcm_init_clmul_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
call .L000pic
.L000pic:
popl %ecx
leal .Lbswap-.L000pic(%ecx),%ecx
movdqu (%eax),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand 16(%ecx),%xmm5
pxor %xmm5,%xmm2
movdqa %xmm2,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,(%edx)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%edx)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%edx)
ret
.size gcm_init_clmul,.-.L_gcm_init_clmul_begin
.globl gcm_ghash_clmul
.hidden gcm_ghash_clmul
.type gcm_ghash_clmul,@function
.align 16
gcm_ghash_clmul:
.L_gcm_ghash_clmul_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 28(%esp),%esi
movl 32(%esp),%ebx
call .L001pic
.L001pic:
popl %ecx
leal .Lbswap-.L001pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movdqu (%edx),%xmm2
.byte 102,15,56,0,197
subl $16,%ebx
jz .L002odd_tail
movdqu (%esi),%xmm3
movdqu 16(%esi),%xmm6
.byte 102,15,56,0,221
.byte 102,15,56,0,245
movdqu 32(%edx),%xmm5
pxor %xmm3,%xmm0
pshufd $78,%xmm6,%xmm3
movdqa %xmm6,%xmm7
pxor %xmm6,%xmm3
leal 32(%esi),%esi
.byte 102,15,58,68,242,0
.byte 102,15,58,68,250,17
.byte 102,15,58,68,221,0
movups 16(%edx),%xmm2
nop
subl $32,%ebx
jbe .L003even_tail
jmp .L004mod_loop
.align 32
.L004mod_loop:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
nop
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movups (%edx),%xmm2
xorps %xmm6,%xmm0
movdqa (%ecx),%xmm5
xorps %xmm7,%xmm1
movdqu (%esi),%xmm7
pxor %xmm0,%xmm3
movdqu 16(%esi),%xmm6
pxor %xmm1,%xmm3
.byte 102,15,56,0,253
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
.byte 102,15,56,0,245
pxor %xmm7,%xmm1
movdqa %xmm6,%xmm7
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
.byte 102,15,58,68,242,0
movups 32(%edx),%xmm5
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
pshufd $78,%xmm7,%xmm3
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm7,%xmm3
pxor %xmm4,%xmm1
.byte 102,15,58,68,250,17
movups 16(%edx),%xmm2
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,58,68,221,0
leal 32(%esi),%esi
subl $32,%ebx
ja .L004mod_loop
.L003even_tail:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movdqa (%ecx),%xmm5
xorps %xmm6,%xmm0
xorps %xmm7,%xmm1
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testl %ebx,%ebx
jnz .L005done
movups (%edx),%xmm2
.L002odd_tail:
movdqu (%esi),%xmm3
.byte 102,15,56,0,221
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.L005done:
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin
.align 64
.Lbswap:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67
.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112
.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62
.byte 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
Prom11111/plumbingtech | 4,159 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghashv8-armx-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.def gcm_init_clmul
.type 32
.endef
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl gcm_gmult_clmul
.def gcm_gmult_clmul
.type 32
.endef
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Prom11111/plumbingtech | 30,876 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/armv8-mont-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.text
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,%function
.align 5
bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,.L1st_skip
.L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,.L1st
.L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
.Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,.Linner_skip
.Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,.Linner
.Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,.Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
.Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,.Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
.Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,.Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
.globl bn_sqr8x_mont
.hidden bn_sqr8x_mont
.type bn_sqr8x_mont,%function
.align 5
bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b .Lsqr8x_zero_start
.Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
.Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,.Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
.Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,.Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
.Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,.Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq .Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b .Lsqr8x_mul
.align 4
.Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,.Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b .Lsqr8x_outer_loop
.align 4
.Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
.Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,.Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
.Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,.Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,.Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
.Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,.Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,.Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b .Lsqr8x_tail
.align 4
.Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne .Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
.Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,.Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
.Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,.Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b .Lsqr8x_done
.align 4
.Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
.Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.size bn_sqr8x_mont,.-bn_sqr8x_mont
.globl bn_mul4x_mont
.hidden bn_mul4x_mont
.type bn_mul4x_mont,%function
.align 5
bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
.Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_1st_reduction
cbz x10,.Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,.Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b .Loop_mul4x_1st_tail
.align 5
.Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
.Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
.Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,.Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b .Loop_mul4x_tail
.align 4
.Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq .Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b .Loop_mul4x_reduction
.align 4
.Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
.Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,.Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
.Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,.Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b .Lmul4x_done
.align 4
.Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
.Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.size bn_mul4x_mont,.-bn_mul4x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Prom11111/plumbingtech | 74,317 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha20_poly1305_armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.align 7
.Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.Linc:
.long 1,2,3,4
.Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.type .Lpoly_hash_ad_internal,%function
.align 6
.Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, .Lpoly_hash_intro
ret
.Lpoly_hash_intro:
cmp x4, #16
b.lt .Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b .Lpoly_hash_ad_internal
.Lpoly_hash_ad_tail:
cbz x4, .Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
.Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge .Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lpoly_hash_ad_ret:
ret
.cfi_endproc
.size .Lpoly_hash_ad_internal, .-.Lpoly_hash_ad_internal
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.hidden chacha20_poly1305_seal
.type chacha20_poly1305_seal,%function
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le .Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
.Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi .Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl .Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le .Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
.Lseal_main_loop:
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
.Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge .Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt .Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le .Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b .Lseal_main_loop
.Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt .Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b .Lseal_tail
.Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt .Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b .Lseal_tail_64
.Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, .Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, .Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // .Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
.Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt .Lseal_tail16_compose_extra_in
add x3, x3, x12
.Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt .Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
.Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt .Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lseal_hash_extra:
cbz x4, .Lseal_finalize
.Lseal_hash_extra_loop:
cmp x4, #16
b.lt .Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b .Lseal_hash_extra_loop
.Lseal_hash_extra_tail:
cbz x4, .Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
.Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt .Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
.Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
.Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi .Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl .Lpoly_hash_ad_internal
b .Lseal_tail
.cfi_endproc
.size chacha20_poly1305_seal,.-chacha20_poly1305_seal
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.hidden chacha20_poly1305_open
.type chacha20_poly1305_open,%function
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le .Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
.Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi .Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl .Lpoly_hash_ad_internal
.Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
.Lopen_main_loop:
cmp x2, #192
b.lt .Lopen_tail
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, .Lopen_main_loop_rounds_short
.align 5
.Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt .Lopen_main_loop_rounds
subs x6, x6, #1
b.ge .Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt .Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt .Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b .Lopen_main_loop
.Lopen_tail:
cbz x2, .Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le .Lopen_tail_64
cmp x2, #128
b.le .Lopen_tail_128
.Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, .Lopen_tail_192_rounds_no_hash
.Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt .Lopen_tail_192_rounds
subs x6, x6, #1
b.ge .Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
.Lopen_tail_192_hash:
cbz x4, .Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b .Lopen_tail_192_hash
.Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b .Lopen_tail_64_store
.Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
.Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt .Lopen_tail_128_rounds
cbz x4, .Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b .Lopen_tail_128_rounds
.Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b .Lopen_tail_64_store
.Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
.Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt .Lopen_tail_64_rounds
cbz x4, .Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b .Lopen_tail_64_rounds
.Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
.Lopen_tail_64_store:
cmp x2, #16
b.lt .Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b .Lopen_tail_64_store
.Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, .Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
.Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt .Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
.Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt .Lopen_tail_16_store
.Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
.Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
.Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi .Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl .Lpoly_hash_ad_internal
.Lopen_128_store:
cmp x2, #64
b.lt .Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
.Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
.Lopen_128_hash_64:
cbz x4, .Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b .Lopen_128_hash_64
.cfi_endproc
.size chacha20_poly1305_open,.-chacha20_poly1305_open
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Prom11111/plumbingtech | 15,121 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesni-x86-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.hidden _aesni_encrypt2
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L000enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L000enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.size _aesni_encrypt2,.-_aesni_encrypt2
.hidden _aesni_encrypt3
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L001enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L001enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.size _aesni_encrypt3,.-_aesni_encrypt3
.hidden _aesni_encrypt4
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L002enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L002enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.size _aesni_encrypt4,.-_aesni_encrypt4
.hidden _aesni_encrypt6
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L003_aesni_encrypt6_inner
.align 16
.L004enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.L003_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.L_aesni_encrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L004enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.size _aesni_encrypt6,.-_aesni_encrypt6
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.L_aes_hw_ctr32_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L005pic_for_function_hit
.L005pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+0-.L005pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $88,%esp
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
je .L006ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $6,%ecx
xorl %ebp,%ebp
movl %ecx,16(%esp)
movl %ecx,20(%esp)
movl %ecx,24(%esp)
movl %ebp,28(%esp)
.byte 102,15,58,22,251,3
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
.byte 102,15,58,34,205,0
incl %ebx
.byte 102,15,58,34,195,1
incl %ebp
.byte 102,15,58,34,205,1
incl %ebx
.byte 102,15,58,34,195,2
incl %ebp
.byte 102,15,58,34,205,2
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
movdqu (%edx),%xmm6
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
pshufd $192,%xmm0,%xmm2
pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
jb .L007ctr32_tail
pxor %xmm6,%xmm7
shll $4,%ecx
movl $16,%ebx
movdqa %xmm7,32(%esp)
movl %edx,%ebp
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
subl $6,%eax
jmp .L008ctr32_loop6
.align 16
.L008ctr32_loop6:
pshufd $64,%xmm0,%xmm4
movdqa 32(%esp),%xmm0
pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
pshufd $64,%xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
.byte 102,15,56,220,209
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
.byte 102,15,56,220,217
movups 32(%ebp),%xmm0
movl %ebx,%ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
movups %xmm6,64(%edi)
pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
pshufd $128,%xmm0,%xmm3
subl $6,%eax
jnc .L008ctr32_loop6
addl $6,%eax
jz .L009ctr32_ret
movdqu (%ebp),%xmm7
movl %ebp,%edx
pxor 32(%esp),%xmm7
movl 240(%ebp),%ecx
.L007ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
jb .L010ctr32_one
pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
je .L011ctr32_two
pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
jb .L012ctr32_three
pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
je .L013ctr32_four
por %xmm7,%xmm6
call _aesni_encrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups 48(%esi),%xmm0
xorps %xmm1,%xmm4
movups 64(%esi),%xmm1
xorps %xmm0,%xmm5
movups %xmm2,(%edi)
xorps %xmm1,%xmm6
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L009ctr32_ret
.align 16
.L006ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
.L010ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L014enc1_loop_1:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L014enc1_loop_1
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
jmp .L009ctr32_ret
.align 16
.L011ctr32_two:
call _aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L009ctr32_ret
.align 16
.L012ctr32_three:
call _aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
movups 32(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L009ctr32_ret
.align 16
.L013ctr32_four:
call _aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
movups 32(%esi),%xmm1
xorps %xmm6,%xmm2
movups 48(%esi),%xmm0
xorps %xmm7,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L009ctr32_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.L_aes_hw_set_encrypt_key_base_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L015pic_for_function_hit
.L015pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+3-.L015pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
pushl %ebx
call .L016pic
.L016pic:
popl %ebx
leal .Lkey_const-.L016pic(%ebx),%ebx
movups (%eax),%xmm0
xorps %xmm4,%xmm4
leal 16(%edx),%edx
cmpl $256,%ecx
je .L01714rounds
cmpl $128,%ecx
jne .L018bad_keybits
.align 16
.L01910rounds:
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
call .L020key_128_cold
.byte 102,15,58,223,200,2
call .L021key_128
.byte 102,15,58,223,200,4
call .L021key_128
.byte 102,15,58,223,200,8
call .L021key_128
.byte 102,15,58,223,200,16
call .L021key_128
.byte 102,15,58,223,200,32
call .L021key_128
.byte 102,15,58,223,200,64
call .L021key_128
.byte 102,15,58,223,200,128
call .L021key_128
.byte 102,15,58,223,200,27
call .L021key_128
.byte 102,15,58,223,200,54
call .L021key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
jmp .L022good_key
.align 16
.L021key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.L020key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L01714rounds:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
call .L023key_256a_cold
.byte 102,15,58,223,200,1
call .L024key_256b
.byte 102,15,58,223,202,2
call .L025key_256a
.byte 102,15,58,223,200,2
call .L024key_256b
.byte 102,15,58,223,202,4
call .L025key_256a
.byte 102,15,58,223,200,4
call .L024key_256b
.byte 102,15,58,223,202,8
call .L025key_256a
.byte 102,15,58,223,200,8
call .L024key_256b
.byte 102,15,58,223,202,16
call .L025key_256a
.byte 102,15,58,223,200,16
call .L024key_256b
.byte 102,15,58,223,202,32
call .L025key_256a
.byte 102,15,58,223,200,32
call .L024key_256b
.byte 102,15,58,223,202,64
call .L025key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
jmp .L022good_key
.align 16
.L025key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
.L023key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L024key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.L022good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
ret
.align 4
.L018bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
ret
.size aes_hw_set_encrypt_key_base,.-.L_aes_hw_set_encrypt_key_base_begin
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.L_aes_hw_set_encrypt_key_alt_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L026pic_for_function_hit
.L026pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+3-.L026pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
pushl %ebx
call .L027pic
.L027pic:
popl %ebx
leal .Lkey_const-.L027pic(%ebx),%ebx
movups (%eax),%xmm0
xorps %xmm4,%xmm4
leal 16(%edx),%edx
cmpl $256,%ecx
je .L02814rounds_alt
cmpl $128,%ecx
jne .L029bad_keybits
.align 16
.L03010rounds_alt:
movdqa (%ebx),%xmm5
movl $8,%ecx
movdqa 32(%ebx),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,-16(%edx)
.L031loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leal 16(%edx),%edx
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%edx)
movdqa %xmm0,%xmm2
decl %ecx
jnz .L031loop_key128
movdqa 48(%ebx),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%edx)
movl $9,%ecx
movl %ecx,96(%edx)
jmp .L032good_key
.align 16
.L02814rounds_alt:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
movdqa (%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $7,%ecx
movdqu %xmm0,-32(%edx)
movdqa %xmm2,%xmm1
movdqu %xmm2,-16(%edx)
.L033loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
decl %ecx
jz .L034done_key256
pshufd $255,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%edx)
leal 32(%edx),%edx
movdqa %xmm2,%xmm1
jmp .L033loop_key256
.L034done_key256:
movl $13,%ecx
movl %ecx,16(%edx)
.L032good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
ret
.align 4
.L029bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
ret
.size aes_hw_set_encrypt_key_alt,.-.L_aes_hw_set_encrypt_key_alt_begin
.align 64
.Lkey_const:
.long 202313229,202313229,202313229,202313229
.long 67569157,67569157,67569157,67569157
.long 1,1,1,1
.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
Prom11111/plumbingtech | 47,706 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha512-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _sha512_block_data_order_nohw
.private_extern _sha512_block_data_order_nohw
.p2align 4
_sha512_block_data_order_nohw:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $128+32,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
L$prologue:
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp L$loop
.p2align 4
L$loop:
movq %rbx,%rdi
leaq K512(%rip),%rbp
xorq %rcx,%rdi
movq 0(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 8(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 16(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 24(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 32(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 40(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 48(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 56(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
addq %r14,%rax
movq 64(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 72(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 80(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 88(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 96(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 104(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 112(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 120(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
jmp L$rounds_16_xx
.p2align 4
L$rounds_16_xx:
movq 8(%rsp),%r13
movq 112(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 72(%rsp),%r12
addq 0(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 16(%rsp),%r13
movq 120(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 80(%rsp),%r12
addq 8(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 24(%rsp),%r13
movq 0(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 88(%rsp),%r12
addq 16(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 32(%rsp),%r13
movq 8(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 96(%rsp),%r12
addq 24(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 40(%rsp),%r13
movq 16(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 104(%rsp),%r12
addq 32(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 48(%rsp),%r13
movq 24(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 112(%rsp),%r12
addq 40(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 56(%rsp),%r13
movq 32(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 120(%rsp),%r12
addq 48(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 64(%rsp),%r13
movq 40(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 0(%rsp),%r12
addq 56(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
movq 72(%rsp),%r13
movq 48(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 8(%rsp),%r12
addq 64(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 80(%rsp),%r13
movq 56(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 16(%rsp),%r12
addq 72(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 88(%rsp),%r13
movq 64(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 24(%rsp),%r12
addq 80(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 96(%rsp),%r13
movq 72(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 32(%rsp),%r12
addq 88(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 104(%rsp),%r13
movq 80(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 40(%rsp),%r12
addq 96(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 112(%rsp),%r13
movq 88(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 48(%rsp),%r12
addq 104(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 120(%rsp),%r13
movq 96(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 56(%rsp),%r12
addq 112(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 0(%rsp),%r13
movq 104(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 64(%rsp),%r12
addq 120(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
cmpb $0,7(%rbp)
jnz L$rounds_16_xx
movq 128+0(%rsp),%rdi
addq %r14,%rax
leaq 128(%rsi),%rsi
addq 0(%rdi),%rax
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb L$loop
movq 152(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue:
ret
.section __DATA,__const
.p2align 6
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl _sha512_block_data_order_avx
.private_extern _sha512_block_data_order_avx
.p2align 6
_sha512_block_data_order_avx:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $160,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
L$prologue_avx:
vzeroupper
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp L$loop_avx
.p2align 4
L$loop_avx:
vmovdqa K512+1280(%rip),%xmm11
vmovdqu 0(%rsi),%xmm0
leaq K512+128(%rip),%rbp
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vpshufb %xmm11,%xmm0,%xmm0
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm11,%xmm1,%xmm1
vmovdqu 64(%rsi),%xmm4
vpshufb %xmm11,%xmm2,%xmm2
vmovdqu 80(%rsi),%xmm5
vpshufb %xmm11,%xmm3,%xmm3
vmovdqu 96(%rsi),%xmm6
vpshufb %xmm11,%xmm4,%xmm4
vmovdqu 112(%rsi),%xmm7
vpshufb %xmm11,%xmm5,%xmm5
vpaddq -128(%rbp),%xmm0,%xmm8
vpshufb %xmm11,%xmm6,%xmm6
vpaddq -96(%rbp),%xmm1,%xmm9
vpshufb %xmm11,%xmm7,%xmm7
vpaddq -64(%rbp),%xmm2,%xmm10
vpaddq -32(%rbp),%xmm3,%xmm11
vmovdqa %xmm8,0(%rsp)
vpaddq 0(%rbp),%xmm4,%xmm8
vmovdqa %xmm9,16(%rsp)
vpaddq 32(%rbp),%xmm5,%xmm9
vmovdqa %xmm10,32(%rsp)
vpaddq 64(%rbp),%xmm6,%xmm10
vmovdqa %xmm11,48(%rsp)
vpaddq 96(%rbp),%xmm7,%xmm11
vmovdqa %xmm8,64(%rsp)
movq %rax,%r14
vmovdqa %xmm9,80(%rsp)
movq %rbx,%rdi
vmovdqa %xmm10,96(%rsp)
xorq %rcx,%rdi
vmovdqa %xmm11,112(%rsp)
movq %r8,%r13
jmp L$avx_00_47
.p2align 4
L$avx_00_47:
addq $256,%rbp
vpalignr $8,%xmm0,%xmm1,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm4,%xmm5,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm0,%xmm0
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 0(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm7,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm7,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm0,%xmm0
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm7,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 8(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm0,%xmm0
xorq %r11,%r14
addq %r13,%r10
vpaddq -128(%rbp),%xmm0,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,0(%rsp)
vpalignr $8,%xmm1,%xmm2,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm5,%xmm6,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm1,%xmm1
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 16(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm0,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm0,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm1,%xmm1
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm0,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 24(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm1,%xmm1
xorq %r9,%r14
addq %r13,%r8
vpaddq -96(%rbp),%xmm1,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,16(%rsp)
vpalignr $8,%xmm2,%xmm3,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm6,%xmm7,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm2,%xmm2
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 32(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm1,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm1,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm2,%xmm2
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm1,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 40(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm2,%xmm2
xorq %rdx,%r14
addq %r13,%rcx
vpaddq -64(%rbp),%xmm2,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,32(%rsp)
vpalignr $8,%xmm3,%xmm4,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm7,%xmm0,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm3,%xmm3
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 48(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm2,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm2,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm3,%xmm3
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm2,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 56(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm3,%xmm3
xorq %rbx,%r14
addq %r13,%rax
vpaddq -32(%rbp),%xmm3,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,48(%rsp)
vpalignr $8,%xmm4,%xmm5,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm0,%xmm1,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm4,%xmm4
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 64(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm3,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm3,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm4,%xmm4
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm3,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 72(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm4,%xmm4
xorq %r11,%r14
addq %r13,%r10
vpaddq 0(%rbp),%xmm4,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,64(%rsp)
vpalignr $8,%xmm5,%xmm6,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm1,%xmm2,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm5,%xmm5
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 80(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm4,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm4,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm5,%xmm5
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm4,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 88(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm5,%xmm5
xorq %r9,%r14
addq %r13,%r8
vpaddq 32(%rbp),%xmm5,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,80(%rsp)
vpalignr $8,%xmm6,%xmm7,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm2,%xmm3,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm6,%xmm6
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 96(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm5,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm5,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm6,%xmm6
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm5,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 104(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm6,%xmm6
xorq %rdx,%r14
addq %r13,%rcx
vpaddq 64(%rbp),%xmm6,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,96(%rsp)
vpalignr $8,%xmm7,%xmm0,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm3,%xmm4,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm7,%xmm7
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 112(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm6,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm6,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm7,%xmm7
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm6,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 120(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm7,%xmm7
xorq %rbx,%r14
addq %r13,%rax
vpaddq 96(%rbp),%xmm7,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,112(%rsp)
cmpb $0,135(%rbp)
jne L$avx_00_47
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 0(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 8(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 16(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 24(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 32(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 40(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 48(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 56(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 64(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 72(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 80(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 88(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 96(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 104(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 112(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 120(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
movq 128+0(%rsp),%rdi
movq %r14,%rax
addq 0(%rdi),%rax
leaq 128(%rsi),%rsi
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb L$loop_avx
movq 152(%rsp),%rsi
vzeroupper
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_avx:
ret
#endif
|
Prom11111/plumbingtech | 20,463 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/x86_64-mont-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,@function
.align 16
bn_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
negq %r9
movq %rsp,%r11
leaq -16(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul_page_walk
jmp .Lmul_page_walk_done
.align 16
.Lmul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul_page_walk
.Lmul_page_walk_done:
movq %rax,8(%rsp,%r9,8)
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
.Lmul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .L1st_enter
.align 16
.L1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.L1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne .L1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp .Louter
.align 16
.Louter:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .Linner_enter
.align 16
.Linner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.Linner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne .Linner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb .Louter
xorq %r14,%r14
movq (%rsp),%rax
movq %r9,%r15
.align 16
.Lsub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsp,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz .Lsub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
.Lcopy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r9,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz .Lcopy
movq 8(%rsp,%r9,8),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul_epilogue:
ret
.cfi_endproc
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
.globl bn_mul4x_mont
.hidden bn_mul4x_mont
.type bn_mul4x_mont,@function
.align 16
bn_mul4x_mont:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
negq %r9
movq %rsp,%r11
leaq -32(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul4x_page_walk
jmp .Lmul4x_page_walk_done
.Lmul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul4x_page_walk
.Lmul4x_page_walk_done:
movq %rax,8(%rsp,%r9,8)
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
.Lmul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .L1st4x
.align 16
.L1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .L1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.align 4
.Louter4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .Linner4x
.align 16
.Linner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .Linner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb .Louter4x
movq 16(%rsp,%r9,8),%rdi
leaq -4(%r9),%r15
movq 0(%rsp),%rax
movq 8(%rsp),%rdx
shrq $2,%r15
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
.Lsub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz .Lsub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
pxor %xmm0,%xmm0
.byte 102,72,15,110,224
pcmpeqd %xmm5,%xmm5
pshufd $0,%xmm4,%xmm4
movq %r9,%r15
pxor %xmm4,%xmm5
shrq $2,%r15
xorl %eax,%eax
jmp .Lcopy4x
.align 16
.Lcopy4x:
movdqa (%rsp,%rax,1),%xmm1
movdqu (%rdi,%rax,1),%xmm2
pand %xmm4,%xmm1
pand %xmm5,%xmm2
movdqa 16(%rsp,%rax,1),%xmm3
movdqa %xmm0,(%rsp,%rax,1)
por %xmm2,%xmm1
movdqu 16(%rdi,%rax,1),%xmm2
movdqu %xmm1,(%rdi,%rax,1)
pand %xmm4,%xmm3
pand %xmm5,%xmm2
movdqa %xmm0,16(%rsp,%rax,1)
por %xmm2,%xmm3
movdqu %xmm3,16(%rdi,%rax,1)
leaq 32(%rax),%rax
decq %r15
jnz .Lcopy4x
movq 8(%rsp,%r9,8),%rsi
.cfi_def_cfa %rsi, 8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul4x_epilogue:
ret
.cfi_endproc
.size bn_mul4x_mont,.-bn_mul4x_mont
.extern bn_sqrx8x_internal
.hidden bn_sqrx8x_internal
.extern bn_sqr8x_internal
.hidden bn_sqr8x_internal
.globl bn_sqr8x_mont
.hidden bn_sqr8x_mont
.type bn_sqr8x_mont,@function
.align 32
bn_sqr8x_mont:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lsqr8x_prologue:
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,2),%r11
movq %rsp,%rbp
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lsqr8x_sp_alt
subq %r11,%rbp
leaq -64(%rbp,%r9,2),%rbp
jmp .Lsqr8x_sp_done
.align 32
.Lsqr8x_sp_alt:
leaq 4096-64(,%r9,2),%r10
leaq -64(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lsqr8x_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lsqr8x_page_walk
jmp .Lsqr8x_page_walk_done
.align 16
.Lsqr8x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lsqr8x_page_walk
.Lsqr8x_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lsqr8x_body:
.byte 102,72,15,110,209
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
testq %rdx,%rdx
jz .Lsqr8x_nox
call bn_sqrx8x_internal
leaq (%r8,%rcx,1),%rbx
movq %rcx,%r9
movq %rcx,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32
.Lsqr8x_nox:
call bn_sqr8x_internal
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32
.Lsqr8x_sub:
movq 0(%rbx),%r12
movq 8(%rbx),%r13
movq 16(%rbx),%r14
movq 24(%rbx),%r15
leaq 32(%rbx),%rbx
sbbq 0(%rbp),%r12
sbbq 8(%rbp),%r13
sbbq 16(%rbp),%r14
sbbq 24(%rbp),%r15
leaq 32(%rbp),%rbp
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz .Lsqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
jmp .Lsqr8x_cond_copy
.align 32
.Lsqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz .Lsqr8x_cond_copy
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lsqr8x_epilogue:
ret
.cfi_endproc
.size bn_sqr8x_mont,.-bn_sqr8x_mont
.globl bn_mulx4x_mont
.hidden bn_mulx4x_mont
.type bn_mulx4x_mont,@function
.align 32
bn_mulx4x_mont:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lmulx4x_prologue:
shll $3,%r9d
xorq %r10,%r10
subq %r9,%r10
movq (%r8),%r8
leaq -72(%rsp,%r10,1),%rbp
andq $-128,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
jmp .Lmulx4x_page_walk_done
.align 16
.Lmulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
.Lmulx4x_page_walk_done:
leaq (%rdx,%r9,1),%r10
movq %r9,0(%rsp)
shrq $5,%r9
movq %r10,16(%rsp)
subq $1,%r9
movq %r8,24(%rsp)
movq %rdi,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
movq %r9,48(%rsp)
jmp .Lmulx4x_body
.align 32
.Lmulx4x_body:
leaq 8(%rdx),%rdi
movq (%rdx),%rdx
leaq 64+32(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r14
addq %rax,%r11
movq %rdi,8(%rsp)
mulxq 16(%rsi),%r12,%r13
adcq %r14,%r12
adcq $0,%r13
movq %r8,%rdi
imulq 24(%rsp),%r8
xorq %rbp,%rbp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%rdi
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00
movq 48(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp .Lmulx4x_1st
.align 32
.Lmulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_1st
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
addq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
jmp .Lmulx4x_outer
.align 32
.Lmulx4x_outer:
movq (%rdi),%rdx
leaq 8(%rdi),%rdi
subq %rax,%rsi
movq %r15,(%rbx)
leaq 64+32(%rsp),%rbx
subq %rax,%rcx
mulxq 0(%rsi),%r8,%r11
xorl %ebp,%ebp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
adoxq -16(%rbx),%r12
adcxq %rbp,%r13
adoxq %rbp,%r13
movq %rdi,8(%rsp)
movq %r8,%r15
imulq 24(%rsp),%r8
xorl %ebp,%ebp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
adcxq %rax,%r13
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
adoxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
leaq 32(%rcx),%rcx
adcxq %rax,%r12
adoxq %rbp,%r15
movq 48(%rsp),%rdi
movq %r12,-16(%rbx)
jmp .Lmulx4x_inner
.align 32
.Lmulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-32(%rbx)
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_inner
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
subq 0(%rbx),%rbp
adcq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
cmpq 16(%rsp),%rdi
jne .Lmulx4x_outer
leaq 64(%rsp),%rbx
subq %rax,%rcx
negq %r15
movq %rax,%rdx
shrq $3+2,%rax
movq 32(%rsp),%rdi
jmp .Lmulx4x_sub
.align 32
.Lmulx4x_sub:
movq 0(%rbx),%r11
movq 8(%rbx),%r12
movq 16(%rbx),%r13
movq 24(%rbx),%r14
leaq 32(%rbx),%rbx
sbbq 0(%rcx),%r11
sbbq 8(%rcx),%r12
sbbq 16(%rcx),%r13
sbbq 24(%rcx),%r14
leaq 32(%rcx),%rcx
movq %r11,0(%rdi)
movq %r12,8(%rdi)
movq %r13,16(%rdi)
movq %r14,24(%rdi)
leaq 32(%rdi),%rdi
decq %rax
jnz .Lmulx4x_sub
sbbq $0,%r15
leaq 64(%rsp),%rbx
subq %rdx,%rdi
.byte 102,73,15,110,207
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
jmp .Lmulx4x_cond_copy
.align 32
.Lmulx4x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
subq $32,%rdx
jnz .Lmulx4x_cond_copy
movq %rdx,(%rbx)
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmulx4x_epilogue:
ret
.cfi_endproc
.size bn_mulx4x_mont,.-bn_mulx4x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 16
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.