repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
rlyzurjf57/sp1
11,854
zkvm/entrypoint/src/memcpy.s
// This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30: // // src/string/memcpy.c // // This was compiled into assembly with: // // clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops // // and labels manually updated to not conflict. // // musl as a whole is licensed under the following standard MIT license: // // ---------------------------------------------------------------------- // Copyright © 2005-2020 Rich Felker, et al. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // ---------------------------------------------------------------------- // // Authors/contributors include: // // A. Wilcox // Ada Worcester // Alex Dowad // Alex Suykov // Alexander Monakov // Andre McCurdy // Andrew Kelley // Anthony G. Basile // Aric Belsito // Arvid Picciani // Bartosz Brachaczek // Benjamin Peterson // Bobby Bingham // Boris Brezillon // Brent Cook // Chris Spiegel // Clément Vasseur // Daniel Micay // Daniel Sabogal // Daurnimator // David Carlier // David Edelsohn // Denys Vlasenko // Dmitry Ivanov // Dmitry V. Levin // Drew DeVault // Emil Renner Berthing // Fangrui Song // Felix Fietkau // Felix Janda // Gianluca Anzolin // Hauke Mehrtens // He X // Hiltjo Posthuma // Isaac Dunham // Jaydeep Patil // Jens Gustedt // Jeremy Huntwork // Jo-Philipp Wich // Joakim Sindholt // John Spencer // Julien Ramseier // Justin Cormack // Kaarle Ritvanen // Khem Raj // Kylie McClain // Leah Neukirchen // Luca Barbato // Luka Perkov // M Farkas-Dyck (Strake) // Mahesh Bodapati // Markus Wichmann // Masanori Ogino // Michael Clark // Michael Forney // Mikhail Kremnyov // Natanael Copa // Nicholas J. Kain // orc // Pascal Cuoq // Patrick Oppenlander // Petr Hosek // Petr Skocik // Pierre Carrier // Reini Urban // Rich Felker // Richard Pennington // Ryan Fairfax // Samuel Holland // Segev Finer // Shiz // sin // Solar Designer // Stefan Kristiansson // Stefan O'Rear // Szabolcs Nagy // Timo Teräs // Trutz Behn // Valentin Ochs // Will Dietz // William Haddon // William Pitcock // // Portions of this software are derived from third-party works licensed // under terms compatible with the above MIT license: // // The TRE regular expression implementation (src/regex/reg* and // src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed // under a 2-clause BSD license (license text in the source files). The // included version has been heavily modified by Rich Felker in 2012, in // the interests of size, simplicity, and namespace cleanliness. // // Much of the math library code (src/math/* and src/complex/*) is // Copyright © 1993,2004 Sun Microsystems or // Copyright © 2003-2011 David Schultz or // Copyright © 2003-2009 Steven G. Kargl or // Copyright © 2003-2009 Bruce D. Evans or // Copyright © 2008 Stephen L. Moshier or // Copyright © 2017-2018 Arm Limited // and labelled as such in comments in the individual source files. All // have been licensed under extremely permissive terms. // // The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 // The Android Open Source Project and is licensed under a two-clause BSD // license. It was taken from Bionic libc, used on Android. // // The AArch64 memcpy and memset code (src/string/aarch64/*) are // Copyright © 1999-2019, Arm Limited. // // The implementation of DES for crypt (src/crypt/crypt_des.c) is // Copyright © 1994 David Burren. It is licensed under a BSD license. // // The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was // originally written by Solar Designer and placed into the public // domain. The code also comes with a fallback permissive license for use // in jurisdictions that may not recognize the public domain. // // The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 // Valentin Ochs and is licensed under an MIT-style license. // // The x86_64 port was written by Nicholas J. Kain and is licensed under // the standard MIT terms. // // The mips and microblaze ports were originally written by Richard // Pennington for use in the ellcc project. The original code was adapted // by Rich Felker for build system and code conventions during upstream // integration. It is licensed under the standard MIT terms. // // The mips64 port was contributed by Imagination Technologies and is // licensed under the standard MIT terms. // // The powerpc port was also originally written by Richard Pennington, // and later supplemented and integrated by John Spencer. It is licensed // under the standard MIT terms. // // All other files which have no copyright comments are original works // produced specifically for use as part of this library, written either // by Rich Felker, the main author of the library, or by one or more // contibutors listed above. Details on authorship of individual files // can be found in the git version control history of the project. The // omission of copyright and license comments in each file is in the // interest of source tree size. // // In addition, permission is hereby granted for all public header files // (include/* and arch/* /bits/* ) and crt files intended to be linked into // applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit // the copyright notice and permission notice otherwise required by the // license, and to use these files without any requirement of // attribution. These files include substantial contributions from: // // Bobby Bingham // John Spencer // Nicholas J. Kain // Rich Felker // Richard Pennington // Stefan Kristiansson // Szabolcs Nagy // // all of whom have explicitly granted such permission. // // This file previously contained text expressing a belief that most of // the files covered by the above exception were sufficiently trivial not // to be subject to copyright, resulting in confusion over whether it // negated the permissions granted in the license. In the spirit of // permissive licensing, and of not having licensing issues being an // obstacle to adoption, that text has been removed. .text .attribute 4, 16 .attribute 5, "rv32im" .file "musl_memcpy.c" .globl memcpy .p2align 2 .type memcpy,@function memcpy: andi a3, a1, 3 seqz a3, a3 seqz a4, a2 or a3, a3, a4 bnez a3, .LBBmemcpy0_11 addi a5, a1, 1 mv a6, a0 .LBBmemcpy0_2: lb a7, 0(a1) addi a4, a1, 1 addi a3, a6, 1 sb a7, 0(a6) addi a2, a2, -1 andi a1, a5, 3 snez a1, a1 snez a6, a2 and a7, a1, a6 addi a5, a5, 1 mv a1, a4 mv a6, a3 bnez a7, .LBBmemcpy0_2 andi a1, a3, 3 beqz a1, .LBBmemcpy0_12 .LBBmemcpy0_4: li a5, 32 bltu a2, a5, .LBBmemcpy0_26 li a5, 3 beq a1, a5, .LBBmemcpy0_19 li a5, 2 beq a1, a5, .LBBmemcpy0_22 li a5, 1 bne a1, a5, .LBBmemcpy0_26 lw a5, 0(a4) sb a5, 0(a3) srli a1, a5, 8 sb a1, 1(a3) srli a6, a5, 16 addi a1, a3, 3 sb a6, 2(a3) addi a2, a2, -3 addi a3, a4, 16 li a4, 16 .LBBmemcpy0_9: lw a6, -12(a3) srli a5, a5, 24 slli a7, a6, 8 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 24 slli a6, t0, 8 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 24 slli t0, a7, 8 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 24 slli a7, a5, 8 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_9 addi a4, a3, -13 j .LBBmemcpy0_25 .LBBmemcpy0_11: mv a3, a0 mv a4, a1 andi a1, a3, 3 bnez a1, .LBBmemcpy0_4 .LBBmemcpy0_12: li a1, 16 bltu a2, a1, .LBBmemcpy0_15 li a1, 15 .LBBmemcpy0_14: lw a5, 0(a4) lw a6, 4(a4) lw a7, 8(a4) lw t0, 12(a4) sw a5, 0(a3) sw a6, 4(a3) sw a7, 8(a3) sw t0, 12(a3) addi a4, a4, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a1, a2, .LBBmemcpy0_14 .LBBmemcpy0_15: andi a1, a2, 8 beqz a1, .LBBmemcpy0_17 lw a1, 0(a4) lw a5, 4(a4) sw a1, 0(a3) sw a5, 4(a3) addi a3, a3, 8 addi a4, a4, 8 .LBBmemcpy0_17: andi a1, a2, 4 beqz a1, .LBBmemcpy0_30 lw a1, 0(a4) sw a1, 0(a3) addi a3, a3, 4 addi a4, a4, 4 j .LBBmemcpy0_30 .LBBmemcpy0_19: lw a5, 0(a4) addi a1, a3, 1 sb a5, 0(a3) addi a2, a2, -1 addi a3, a4, 16 li a4, 18 .LBBmemcpy0_20: lw a6, -12(a3) srli a5, a5, 8 slli a7, a6, 24 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 8 slli a6, t0, 24 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 8 slli t0, a7, 24 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 8 slli a7, a5, 24 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_20 addi a4, a3, -15 j .LBBmemcpy0_25 .LBBmemcpy0_22: lw a5, 0(a4) sb a5, 0(a3) srli a6, a5, 8 addi a1, a3, 2 sb a6, 1(a3) addi a2, a2, -2 addi a3, a4, 16 li a4, 17 .LBBmemcpy0_23: lw a6, -12(a3) srli a5, a5, 16 slli a7, a6, 16 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 16 slli a6, t0, 16 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 16 slli t0, a7, 16 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 16 slli a7, a5, 16 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_23 addi a4, a3, -14 .LBBmemcpy0_25: mv a3, a1 .LBBmemcpy0_26: andi a1, a2, 16 bnez a1, .LBBmemcpy0_35 andi a1, a2, 8 bnez a1, .LBBmemcpy0_36 .LBBmemcpy0_28: andi a1, a2, 4 beqz a1, .LBBmemcpy0_30 .LBBmemcpy0_29: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) addi a4, a4, 4 addi a5, a3, 4 sb a1, 3(a3) mv a3, a5 .LBBmemcpy0_30: andi a1, a2, 2 bnez a1, .LBBmemcpy0_33 andi a1, a2, 1 bnez a1, .LBBmemcpy0_34 .LBBmemcpy0_32: ret .LBBmemcpy0_33: lb a1, 0(a4) lb a5, 1(a4) sb a1, 0(a3) addi a4, a4, 2 addi a1, a3, 2 sb a5, 1(a3) mv a3, a1 andi a1, a2, 1 beqz a1, .LBBmemcpy0_32 .LBBmemcpy0_34: lb a1, 0(a4) sb a1, 0(a3) ret .LBBmemcpy0_35: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) lb a5, 4(a4) lb a6, 5(a4) sb a1, 3(a3) lb a1, 6(a4) sb a5, 4(a3) sb a6, 5(a3) lb a5, 7(a4) sb a1, 6(a3) lb a1, 8(a4) lb a6, 9(a4) sb a5, 7(a3) lb a5, 10(a4) sb a1, 8(a3) sb a6, 9(a3) lb a1, 11(a4) sb a5, 10(a3) lb a5, 12(a4) lb a6, 13(a4) sb a1, 11(a3) lb a1, 14(a4) sb a5, 12(a3) sb a6, 13(a3) lb a5, 15(a4) sb a1, 14(a3) addi a4, a4, 16 addi a1, a3, 16 sb a5, 15(a3) mv a3, a1 andi a1, a2, 8 beqz a1, .LBBmemcpy0_28 .LBBmemcpy0_36: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) lb a5, 4(a4) lb a6, 5(a4) sb a1, 3(a3) lb a1, 6(a4) sb a5, 4(a3) sb a6, 5(a3) lb a5, 7(a4) sb a1, 6(a3) addi a4, a4, 8 addi a1, a3, 8 sb a5, 7(a3) mv a3, a1 andi a1, a2, 4 bnez a1, .LBBmemcpy0_29 j .LBBmemcpy0_30 .Lfuncmemcpy_end0: .size memcpy, .Lfuncmemcpy_end0-memcpy .ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157" .section ".note.GNU-stack","",@progbits .addrsig
rlyzurjf57/sp1
8,449
zkvm/entrypoint/src/memset.s
// This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30: // // src/string/memset.c // // This was compiled into assembly with: // // clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops // // and labels manually updated to not conflict. // // musl as a whole is licensed under the following standard MIT license: // // ---------------------------------------------------------------------- // Copyright © 2005-2020 Rich Felker, et al. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // ---------------------------------------------------------------------- // // Authors/contributors include: // // A. Wilcox // Ada Worcester // Alex Dowad // Alex Suykov // Alexander Monakov // Andre McCurdy // Andrew Kelley // Anthony G. Basile // Aric Belsito // Arvid Picciani // Bartosz Brachaczek // Benjamin Peterson // Bobby Bingham // Boris Brezillon // Brent Cook // Chris Spiegel // Clément Vasseur // Daniel Micay // Daniel Sabogal // Daurnimator // David Carlier // David Edelsohn // Denys Vlasenko // Dmitry Ivanov // Dmitry V. Levin // Drew DeVault // Emil Renner Berthing // Fangrui Song // Felix Fietkau // Felix Janda // Gianluca Anzolin // Hauke Mehrtens // He X // Hiltjo Posthuma // Isaac Dunham // Jaydeep Patil // Jens Gustedt // Jeremy Huntwork // Jo-Philipp Wich // Joakim Sindholt // John Spencer // Julien Ramseier // Justin Cormack // Kaarle Ritvanen // Khem Raj // Kylie McClain // Leah Neukirchen // Luca Barbato // Luka Perkov // M Farkas-Dyck (Strake) // Mahesh Bodapati // Markus Wichmann // Masanori Ogino // Michael Clark // Michael Forney // Mikhail Kremnyov // Natanael Copa // Nicholas J. Kain // orc // Pascal Cuoq // Patrick Oppenlander // Petr Hosek // Petr Skocik // Pierre Carrier // Reini Urban // Rich Felker // Richard Pennington // Ryan Fairfax // Samuel Holland // Segev Finer // Shiz // sin // Solar Designer // Stefan Kristiansson // Stefan O'Rear // Szabolcs Nagy // Timo Teräs // Trutz Behn // Valentin Ochs // Will Dietz // William Haddon // William Pitcock // // Portions of this software are derived from third-party works licensed // under terms compatible with the above MIT license: // // The TRE regular expression implementation (src/regex/reg* and // src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed // under a 2-clause BSD license (license text in the source files). The // included version has been heavily modified by Rich Felker in 2012, in // the interests of size, simplicity, and namespace cleanliness. // // Much of the math library code (src/math/* and src/complex/*) is // Copyright © 1993,2004 Sun Microsystems or // Copyright © 2003-2011 David Schultz or // Copyright © 2003-2009 Steven G. Kargl or // Copyright © 2003-2009 Bruce D. Evans or // Copyright © 2008 Stephen L. Moshier or // Copyright © 2017-2018 Arm Limited // and labelled as such in comments in the individual source files. All // have been licensed under extremely permissive terms. // // The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 // The Android Open Source Project and is licensed under a two-clause BSD // license. It was taken from Bionic libc, used on Android. // // The AArch64 memcpy and memset code (src/string/aarch64/*) are // Copyright © 1999-2019, Arm Limited. // // The implementation of DES for crypt (src/crypt/crypt_des.c) is // Copyright © 1994 David Burren. It is licensed under a BSD license. // // The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was // originally written by Solar Designer and placed into the public // domain. The code also comes with a fallback permissive license for use // in jurisdictions that may not recognize the public domain. // // The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 // Valentin Ochs and is licensed under an MIT-style license. // // The x86_64 port was written by Nicholas J. Kain and is licensed under // the standard MIT terms. // // The mips and microblaze ports were originally written by Richard // Pennington for use in the ellcc project. The original code was adapted // by Rich Felker for build system and code conventions during upstream // integration. It is licensed under the standard MIT terms. // // The mips64 port was contributed by Imagination Technologies and is // licensed under the standard MIT terms. // // The powerpc port was also originally written by Richard Pennington, // and later supplemented and integrated by John Spencer. It is licensed // under the standard MIT terms. // // All other files which have no copyright comments are original works // produced specifically for use as part of this library, written either // by Rich Felker, the main author of the library, or by one or more // contibutors listed above. Details on authorship of individual files // can be found in the git version control history of the project. The // omission of copyright and license comments in each file is in the // interest of source tree size. // // In addition, permission is hereby granted for all public header files // (include/* and arch/* /bits/* ) and crt files intended to be linked into // applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit // the copyright notice and permission notice otherwise required by the // license, and to use these files without any requirement of // attribution. These files include substantial contributions from: // // Bobby Bingham // John Spencer // Nicholas J. Kain // Rich Felker // Richard Pennington // Stefan Kristiansson // Szabolcs Nagy // // all of whom have explicitly granted such permission. // // This file previously contained text expressing a belief that most of // the files covered by the above exception were sufficiently trivial not // to be subject to copyright, resulting in confusion over whether it // negated the permissions granted in the license. In the spirit of // permissive licensing, and of not having licensing issues being an // obstacle to adoption, that text has been removed. .text .attribute 4, 16 .attribute 5, "rv32im" .file "musl_memset.c" .globl memset .p2align 2 .type memset,@function memset: beqz a2, .LBB0_9memset sb a1, 0(a0) add a3, a2, a0 li a4, 3 sb a1, -1(a3) bltu a2, a4, .LBB0_9memset sb a1, 1(a0) sb a1, 2(a0) sb a1, -2(a3) li a4, 7 sb a1, -3(a3) bltu a2, a4, .LBB0_9memset sb a1, 3(a0) li a5, 9 sb a1, -4(a3) bltu a2, a5, .LBB0_9memset neg a3, a0 andi a4, a3, 3 add a3, a0, a4 sub a2, a2, a4 andi a2, a2, -4 andi a1, a1, 255 lui a4, 4112 addi a4, a4, 257 mul a1, a1, a4 sw a1, 0(a3) add a4, a3, a2 sw a1, -4(a4) bltu a2, a5, .LBB0_9memset sw a1, 4(a3) sw a1, 8(a3) sw a1, -12(a4) li a5, 25 sw a1, -8(a4) bltu a2, a5, .LBB0_9memset sw a1, 12(a3) sw a1, 16(a3) sw a1, 20(a3) sw a1, 24(a3) sw a1, -28(a4) sw a1, -24(a4) sw a1, -20(a4) andi a5, a3, 4 ori a5, a5, 24 sub a2, a2, a5 li a6, 32 sw a1, -16(a4) bltu a2, a6, .LBB0_9memset add a3, a3, a5 li a4, 31 .LBB0_8memset: sw a1, 0(a3) sw a1, 4(a3) sw a1, 8(a3) sw a1, 12(a3) sw a1, 16(a3) sw a1, 20(a3) sw a1, 24(a3) sw a1, 28(a3) addi a2, a2, -32 addi a3, a3, 32 bltu a4, a2, .LBB0_8memset .LBB0_9memset: ret .Lfunc_end0memset: .size memset, .Lfunc_end0memset-memset .ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157" .section ".note.GNU-stack","",@progbits .addrsig
robalar/os
4,530
src/boot.s
/* Declare constants for the multiboot header. */ .set ALIGN, 1<<0 /* align loaded modules on page boundaries */ .set MEMINFO, 1<<1 /* provide memory map */ .set FLAGS, ALIGN | MEMINFO /* this is the Multiboot 'flag' field */ .set MAGIC, 0x1BADB002 /* 'magic number' lets bootloader find the header */ .set CHECKSUM, -(MAGIC + FLAGS) /* checksum of above, to prove we are multiboot */ /* Declare a multiboot header that marks the program as a kernel. These are magic values that are documented in the multiboot standard. The bootloader will search for this signature in the first 8 KiB of the kernel file, aligned at a 32-bit boundary. The signature is in its own section so the header can be forced to be within the first 8 KiB of the kernel file. */ .section .multiboot .align 4 .long MAGIC .long FLAGS .long CHECKSUM /* The multiboot standard does not define the value of the stack pointer register (esp) and it is up to the kernel to provide a stack. This allocates room for a small stack by creating a symbol at the bottom of it, then allocating 16384 bytes for it, and finally creating a symbol at the top. The stack grows downwards on x86. The stack is in its own section so it can be marked nobits, which means the kernel file is smaller because it does not contain an uninitialized stack. The stack on x86 must be 16-byte aligned according to the System V ABI standard and de-facto extensions. The compiler will assume the stack is properly aligned and failure to align the stack will result in undefined behavior. */ .section .bss .align 16 stack_bottom: .skip 16384 # 16 KiB stack_top: /* The linker script specifies _start as the entry point to the kernel and the bootloader will jump to this position once the kernel has been loaded. It doesn't make sense to return from this function as the bootloader is gone. */ .section .text .global _start .type _start, @function _start: /* The bootloader has loaded us into 32-bit protected mode on a x86 machine. Interrupts are disabled. Paging is disabled. The processor state is as defined in the multiboot standard. The kernel has full control of the CPU. The kernel can only make use of hardware features and any code it provides as part of itself. There's no printf function, unless the kernel provides its own <stdio.h> header and a printf implementation. There are no security restrictions, no safeguards, no debugging mechanisms, only what the kernel provides itself. It has absolute and complete power over the machine. */ /* To set up a stack, we set the esp register to point to the top of the stack (as it grows downwards on x86 systems). This is necessarily done in assembly as languages such as C cannot function without a stack. */ mov $stack_top, %esp /* This is a good place to initialize crucial processor state before the high-level kernel is entered. It's best to minimize the early environment where crucial features are offline. Note that the processor is not fully initialized yet: Features such as floating point instructions and instruction set extensions are not initialized yet. The GDT should be loaded here. Paging should be enabled here. C++ features such as global constructors and exceptions will require runtime support to work as well. */ /* Enter the high-level kernel. The ABI requires the stack is 16-byte aligned at the time of the call instruction (which afterwards pushes the return pointer of size 4 bytes). The stack was originally 16-byte aligned above and we've pushed a multiple of 16 bytes to the stack since (pushed 0 bytes so far), so the alignment has thus been preserved and the call is well defined. */ call kernel_main /* If the system has nothing more to do, put the computer into an infinite loop. To do that: 1) Disable interrupts with cli (clear interrupt enable in eflags). They are already disabled by the bootloader, so this is not needed. Mind that you might later enable interrupts and return from kernel_main (which is sort of nonsensical to do). 2) Wait for the next interrupt to arrive with hlt (halt instruction). Since they are disabled, this will lock up the computer. 3) Jump to the hlt instruction if it ever wakes up due to a non-maskable interrupt occurring or due to system management mode. */ cli 1: hlt jmp 1b /* Set the size of the _start symbol to the current location '.' minus its start. This is useful when debugging or when you implement call tracing. */ .size _start, . - _start
rslabbox/arm_rstiny
2,415
src/arch/trap.S
.macro SAVE_REGS sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mrs x9, sp_el0 mrs x10, elr_el1 mrs x11, spsr_el1 stp x30, x9, [sp, 30 * 8] stp x10, x11, [sp, 32 * 8] .endm .macro RESTORE_REGS ldp x10, x11, [sp, 32 * 8] ldp x30, x9, [sp, 30 * 8] msr sp_el0, x9 msr elr_el1, x10 msr spsr_el1, x11 ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC .p2align 7 SAVE_REGS mov x0, sp bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ .p2align 7 SAVE_REGS mov x0, sp bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret
rslabbox/arm_rstiny
2,544
tools/chainloader/src/_arch/aarch64/cpu/boot.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g2:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x0, MPIDR_EL1 and x0, x0, {CONST_CORE_ID_MASK} ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs cmp x0, x1 b.ne .L_parking_loop // If execution reaches here, it is the boot core. // Initialize DRAM. ADR_ABS x0, __bss_start ADR_ABS x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_relocate_binary stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Next, relocate the binary. .L_relocate_binary: ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to. ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to. ADR_ABS x2, __binary_nonzero_end_exclusive .L_copy_loop: ldr x3, [x0], #8 str x3, [x1], #8 cmp x1, x2 b.lo .L_copy_loop // Prepare the jump to Rust code. // Set the stack pointer. ADR_ABS x0, __boot_core_stack_end_exclusive mov sp, x0 // Jump to the relocated Rust code. ADR_ABS x1, _start_rust br x1 // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
Ruanyx1823/sve
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
Ruanyx1823/rust-merge-sve
4,337
library/compiler-builtins/compiler-builtins/src/hexagon/dfsqrt.s
.text .global __hexagon_sqrtdf2 .type __hexagon_sqrtdf2,@function .global __hexagon_sqrt .type __hexagon_sqrt,@function .global __qdsp_sqrtdf2 ; .set __qdsp_sqrtdf2, __hexagon_sqrtdf2; .type __qdsp_sqrtdf2,@function .global __qdsp_sqrt ; .set __qdsp_sqrt, __hexagon_sqrt; .type __qdsp_sqrt,@function .global __hexagon_fast_sqrtdf2 ; .set __hexagon_fast_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast_sqrtdf2,@function .global __hexagon_fast_sqrt ; .set __hexagon_fast_sqrt, __hexagon_sqrt; .type __hexagon_fast_sqrt,@function .global __hexagon_fast2_sqrtdf2 ; .set __hexagon_fast2_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast2_sqrtdf2,@function .global __hexagon_fast2_sqrt ; .set __hexagon_fast2_sqrt, __hexagon_sqrt; .type __hexagon_fast2_sqrt,@function .type sqrt,@function .p2align 5 __hexagon_sqrtdf2: __hexagon_sqrt: { r15:14 = extractu(r1:0,#23 +1,#52 -23) r28 = extractu(r1,#11,#52 -32) r5:4 = combine(##0x3f000004,#1) } { p2 = dfclass(r1:0,#0x02) p2 = cmp.gt(r1,#-1) if (!p2.new) jump:nt .Lsqrt_abnormal r9 = or(r5,r14) } .Ldenormal_restart: { r11:10 = r1:0 r7,p0 = sfinvsqrta(r9) r5 = and(r5,#-16) r3:2 = #0 } { r3 += sfmpy(r7,r9):lib r2 += sfmpy(r7,r5):lib r6 = r5 r9 = and(r28,#1) } { r6 -= sfmpy(r3,r2):lib r11 = insert(r4,#11 +1,#52 -32) p1 = cmp.gtu(r9,#0) } { r3 += sfmpy(r3,r6):lib r2 += sfmpy(r2,r6):lib r6 = r5 r9 = mux(p1,#8,#9) } { r6 -= sfmpy(r3,r2):lib r11:10 = asl(r11:10,r9) r9 = mux(p1,#3,#2) } { r2 += sfmpy(r2,r6):lib r15:14 = asl(r11:10,r9) } { r2 = and(r2,##0x007fffff) } { r2 = add(r2,##0x00800000 - 3) r9 = mux(p1,#7,#8) } { r8 = asl(r2,r9) r9 = mux(p1,#15-(1+1),#15-(1+0)) } { r13:12 = mpyu(r8,r15) } { r1:0 = asl(r11:10,#15) r15:14 = mpyu(r13,r13) p1 = cmp.eq(r0,r0) } { r1:0 -= asl(r15:14,#15) r15:14 = mpyu(r13,r12) p2 = cmp.eq(r0,r0) } { r1:0 -= lsr(r15:14,#16) p3 = cmp.eq(r0,r0) } { r1:0 = mpyu(r1,r8) } { r13:12 += lsr(r1:0,r9) r9 = add(r9,#16) r1:0 = asl(r11:10,#31) } { r15:14 = mpyu(r13,r13) r1:0 -= mpyu(r13,r12) } { r1:0 -= asl(r15:14,#31) r15:14 = mpyu(r12,r12) } { r1:0 -= lsr(r15:14,#33) } { r1:0 = mpyu(r1,r8) } { r13:12 += lsr(r1:0,r9) r9 = add(r9,#16) r1:0 = asl(r11:10,#47) } { r15:14 = mpyu(r13,r13) } { r1:0 -= asl(r15:14,#47) r15:14 = mpyu(r13,r12) } { r1:0 -= asl(r15:14,#16) r15:14 = mpyu(r12,r12) } { r1:0 -= lsr(r15:14,#17) } { r1:0 = mpyu(r1,r8) } { r13:12 += lsr(r1:0,r9) } { r3:2 = mpyu(r13,r12) r5:4 = mpyu(r12,r12) r15:14 = #0 r1:0 = #0 } { r3:2 += lsr(r5:4,#33) r5:4 += asl(r3:2,#33) p1 = cmp.eq(r0,r0) } { r7:6 = mpyu(r13,r13) r1:0 = sub(r1:0,r5:4,p1):carry r9:8 = #1 } { r7:6 += lsr(r3:2,#31) r9:8 += asl(r13:12,#1) } { r15:14 = sub(r11:10,r7:6,p1):carry r5:4 = sub(r1:0,r9:8,p2):carry r7:6 = #1 r11:10 = #0 } { r3:2 = sub(r15:14,r11:10,p2):carry r7:6 = add(r13:12,r7:6) r28 = add(r28,#-0x3ff) } { if (p2) r13:12 = r7:6 if (p2) r1:0 = r5:4 if (p2) r15:14 = r3:2 } { r5:4 = sub(r1:0,r9:8,p3):carry r7:6 = #1 r28 = asr(r28,#1) } { r3:2 = sub(r15:14,r11:10,p3):carry r7:6 = add(r13:12,r7:6) } { if (p3) r13:12 = r7:6 if (p3) r1:0 = r5:4 r2 = #1 } { p0 = cmp.eq(r1:0,r11:10) if (!p0.new) r12 = or(r12,r2) r3 = cl0(r13:12) r28 = add(r28,#-63) } { r1:0 = convert_ud2df(r13:12) r28 = add(r28,r3) } { r1 += asl(r28,#52 -32) jumpr r31 } .Lsqrt_abnormal: { p0 = dfclass(r1:0,#0x01) if (p0.new) jumpr:t r31 } { p0 = dfclass(r1:0,#0x10) if (p0.new) jump:nt .Lsqrt_nan } { p0 = cmp.gt(r1,#-1) if (!p0.new) jump:nt .Lsqrt_invalid_neg if (!p0.new) r28 = ##0x7F800001 } { p0 = dfclass(r1:0,#0x08) if (p0.new) jumpr:nt r31 } { r1:0 = extractu(r1:0,#52,#0) } { r28 = add(clb(r1:0),#-11) } { r1:0 = asl(r1:0,r28) r28 = sub(#1,r28) } { r1 = insert(r28,#1,#52 -32) } { r3:2 = extractu(r1:0,#23 +1,#52 -23) r5 = ##0x3f000004 } { r9 = or(r5,r2) r5 = and(r5,#-16) jump .Ldenormal_restart } .Lsqrt_nan: { r28 = convert_df2sf(r1:0) r1:0 = #-1 jumpr r31 } .Lsqrt_invalid_neg: { r1:0 = convert_sf2df(r28) jumpr r31 } .size __hexagon_sqrt,.-__hexagon_sqrt .size __hexagon_sqrtdf2,.-__hexagon_sqrtdf2
Ruanyx1823/rust-merge-sve
3,885
library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_ldlib_asm.s
.text .global __hexagon_fast2ldadd_asm .type __hexagon_fast2ldadd_asm, @function __hexagon_fast2ldadd_asm: .falign { R4 = memw(r29+#8) R5 = memw(r29+#24) r7 = r0 } { R6 = sub(R4, R5):sat P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { R6 = abs(R6):sat if ( P0) R4 = #1 if (!P0) R5 = #1 R9 = #62 } { R6 = MIN(R6, R9) R1:0 = memd(r29+#0) R3:2 = memd(r29+#16) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = add(R1:0, R3:2) R3:2 = #0 } { R4 = clb(R1:0) R9.L =#0x0001 } { R8 -= add(R4, #-1) R4 = add(R4, #-1) p0 = cmp.gt(R4, #58) R9.H =#0x8000 } { if(!p0)memw(r7+#8) = R8 R1:0 = ASL(R1:0, R4) if(p0) jump .Ldenorma1 } { memd(r7+#0) = R1:0 jumpr r31 } .Ldenorma1: memd(r7+#0) = R3:2 { memw(r7+#8) = R9 jumpr r31 } .text .global __hexagon_fast2ldsub_asm .type __hexagon_fast2ldsub_asm, @function __hexagon_fast2ldsub_asm: .falign { R4 = memw(r29+#8) R5 = memw(r29+#24) r7 = r0 } { R6 = sub(R4, R5):sat P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { R6 = abs(R6):sat if ( P0) R4 = #1 if (!P0) R5 = #1 R9 = #62 } { R6 = min(R6, R9) R1:0 = memd(r29+#0) R3:2 = memd(r29+#16) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = sub(R1:0, R3:2) R3:2 = #0 } { R4 = clb(R1:0) R9.L =#0x0001 } { R8 -= add(R4, #-1) R4 = add(R4, #-1) p0 = cmp.gt(R4, #58) R9.H =#0x8000 } { if(!p0)memw(r7+#8) = R8 R1:0 = asl(R1:0, R4) if(p0) jump .Ldenorma_s } { memd(r7+#0) = R1:0 jumpr r31 } .Ldenorma_s: memd(r7+#0) = R3:2 { memw(r7+#8) = R9 jumpr r31 } .text .global __hexagon_fast2ldmpy_asm .type __hexagon_fast2ldmpy_asm, @function __hexagon_fast2ldmpy_asm: .falign { R15:14 = memd(r29+#0) R3:2 = memd(r29+#16) R13:12 = #0 } { R8= extractu(R2, #31, #1) R9= extractu(R14, #31, #1) R13.H = #0x8000 } { R11:10 = mpy(R15, R3) R7:6 = mpy(R15, R8) R4 = memw(r29+#8) R5 = memw(r29+#24) } { R11:10 = add(R11:10, R11:10) R7:6 += mpy(R3, R9) } { R7:6 = asr(R7:6, #30) R8.L = #0x0001 p1 = cmp.eq(R15:14, R3:2) } { R7:6 = add(R7:6, R11:10) R4= add(R4, R5) p2 = cmp.eq(R3:2, R13:12) } { R9 = clb(R7:6) R8.H = #0x8000 p1 = and(p1, p2) } { R4-= add(R9, #-1) R9 = add(R9, #-1) if(p1) jump .Lsat1 } { R7:6 = asl(R7:6, R9) memw(R0+#8) = R4 p0 = cmp.gt(R9, #58) if(p0.new) jump:NT .Ldenorm1 } { memd(R0+#0) = R7:6 jumpr r31 } .Lsat1: { R13:12 = #0 R4+= add(R9, #1) } { R13.H = #0x4000 memw(R0+#8) = R4 } { memd(R0+#0) = R13:12 jumpr r31 } .Ldenorm1: { memw(R0+#8) = R8 R15:14 = #0 } { memd(R0+#0) = R15:14 jumpr r31 }
Ruanyx1823/rust-merge-sve
4,378
library/compiler-builtins/compiler-builtins/src/hexagon/dfmul.s
.text .global __hexagon_muldf3 .type __hexagon_muldf3,@function .global __qdsp_muldf3 ; .set __qdsp_muldf3, __hexagon_muldf3 .global __hexagon_fast_muldf3 ; .set __hexagon_fast_muldf3, __hexagon_muldf3 .global __hexagon_fast2_muldf3 ; .set __hexagon_fast2_muldf3, __hexagon_muldf3 .p2align 5 __hexagon_muldf3: { p0 = dfclass(r1:0,#2) p0 = dfclass(r3:2,#2) r13:12 = combine(##0x40000000,#0) } { r13:12 = insert(r1:0,#52,#11 -1) r5:4 = asl(r3:2,#11 -1) r28 = #-1024 r9:8 = #1 } { r7:6 = mpyu(r4,r13) r5:4 = insert(r9:8,#2,#62) } { r15:14 = mpyu(r12,r4) r7:6 += mpyu(r12,r5) } { r7:6 += lsr(r15:14,#32) r11:10 = mpyu(r13,r5) r5:4 = combine(##1024 +1024 -4,#0) } { r11:10 += lsr(r7:6,#32) if (!p0) jump .Lmul_abnormal p1 = cmp.eq(r14,#0) p1 = cmp.eq(r6,#0) } { if (!p1) r10 = or(r10,r8) r6 = extractu(r1,#11,#20) r7 = extractu(r3,#11,#20) } { r15:14 = neg(r11:10) r6 += add(r28,r7) r28 = xor(r1,r3) } { if (!p2.new) r11:10 = r15:14 p2 = cmp.gt(r28,#-1) p0 = !cmp.gt(r6,r5) p0 = cmp.gt(r6,r4) if (!p0.new) jump:nt .Lmul_ovf_unf } { r1:0 = convert_d2df(r11:10) r6 = add(r6,#-1024 -58) } { r1 += asl(r6,#20) jumpr r31 } .falign .Lpossible_unf1: { p0 = cmp.eq(r0,#0) p0 = bitsclr(r1,r4) if (!p0.new) jumpr:t r31 r5 = #0x7fff } { p0 = bitsset(r13,r5) r4 = USR r5 = #0x030 } { if (p0) r4 = or(r4,r5) } { USR = r4 } { p0 = dfcmp.eq(r1:0,r1:0) jumpr r31 } .falign .Lmul_ovf_unf: { r1:0 = convert_d2df(r11:10) r13:12 = abs(r11:10) r7 = add(r6,#-1024 -58) } { r1 += asl(r7,#20) r7 = extractu(r1,#11,#20) r4 = ##0x7FEFFFFF } { r7 += add(r6,##-1024 -58) r5 = #0 } { p0 = cmp.gt(r7,##1024 +1024 -2) if (p0.new) jump:nt .Lmul_ovf } { p0 = cmp.gt(r7,#0) if (p0.new) jump:nt .Lpossible_unf1 r5 = sub(r6,r5) r28 = #63 } { r4 = #0 r5 = sub(#5,r5) } { p3 = cmp.gt(r11,#-1) r5 = min(r5,r28) r11:10 = r13:12 } { r28 = USR r15:14 = extractu(r11:10,r5:4) } { r11:10 = asr(r11:10,r5) r4 = #0x0030 r1 = insert(r9,#11,#20) } { p0 = cmp.gtu(r9:8,r15:14) if (!p0.new) r10 = or(r10,r8) r11 = setbit(r11,#20 +3) } { r15:14 = neg(r11:10) p1 = bitsclr(r10,#0x7) if (!p1.new) r28 = or(r4,r28) } { if (!p3) r11:10 = r15:14 USR = r28 } { r1:0 = convert_d2df(r11:10) p0 = dfcmp.eq(r1:0,r1:0) } { r1 = insert(r9,#11 -1,#20 +1) jumpr r31 } .falign .Lmul_ovf: { r28 = USR r13:12 = combine(##0x7fefffff,#-1) r1:0 = r11:10 } { r14 = extractu(r28,#2,#22) r28 = or(r28,#0x28) r5:4 = combine(##0x7ff00000,#0) } { USR = r28 r14 ^= lsr(r1,#31) r28 = r14 } { p0 = !cmp.eq(r28,#1) p0 = !cmp.eq(r14,#2) if (p0.new) r13:12 = r5:4 p0 = dfcmp.eq(r1:0,r1:0) } { r1:0 = insert(r13:12,#63,#0) jumpr r31 } .Lmul_abnormal: { r13:12 = extractu(r1:0,#63,#0) r5:4 = extractu(r3:2,#63,#0) } { p3 = cmp.gtu(r13:12,r5:4) if (!p3.new) r1:0 = r3:2 if (!p3.new) r3:2 = r1:0 } { p0 = dfclass(r1:0,#0x0f) if (!p0.new) jump:nt .Linvalid_nan if (!p3) r13:12 = r5:4 if (!p3) r5:4 = r13:12 } { p1 = dfclass(r1:0,#0x08) p1 = dfclass(r3:2,#0x0e) } { p0 = dfclass(r1:0,#0x08) p0 = dfclass(r3:2,#0x01) } { if (p1) jump .Ltrue_inf p2 = dfclass(r3:2,#0x01) } { if (p0) jump .Linvalid_zeroinf if (p2) jump .Ltrue_zero r28 = ##0x7c000000 } { p0 = bitsclr(r1,r28) if (p0.new) jump:nt .Lmul_tiny } { r28 = cl0(r5:4) } { r28 = add(r28,#-11) } { r5:4 = asl(r5:4,r28) } { r3:2 = insert(r5:4,#63,#0) r1 -= asl(r28,#20) } jump __hexagon_muldf3 .Lmul_tiny: { r28 = USR r1:0 = xor(r1:0,r3:2) } { r28 = or(r28,#0x30) r1:0 = insert(r9:8,#63,#0) r5 = extractu(r28,#2,#22) } { USR = r28 p0 = cmp.gt(r5,#1) if (!p0.new) r0 = #0 r5 ^= lsr(r1,#31) } { p0 = cmp.eq(r5,#3) if (!p0.new) r0 = #0 jumpr r31 } .Linvalid_zeroinf: { r28 = USR } { r1:0 = #-1 r28 = or(r28,#2) } { USR = r28 } { p0 = dfcmp.uo(r1:0,r1:0) jumpr r31 } .Linvalid_nan: { p0 = dfclass(r3:2,#0x0f) r28 = convert_df2sf(r1:0) if (p0.new) r3:2 = r1:0 } { r2 = convert_df2sf(r3:2) r1:0 = #-1 jumpr r31 } .falign .Ltrue_zero: { r1:0 = r3:2 r3:2 = r1:0 } .Ltrue_inf: { r3 = extract(r3,#1,#31) } { r1 ^= asl(r3,#31) jumpr r31 } .size __hexagon_muldf3,.-__hexagon_muldf3
Ruanyx1823/rust-merge-sve
7,236
library/compiler-builtins/compiler-builtins/src/hexagon/dffma.s
.text .global __hexagon_fmadf4 .type __hexagon_fmadf4,@function .global __hexagon_fmadf5 .type __hexagon_fmadf5,@function .global __qdsp_fmadf5 ; .set __qdsp_fmadf5, __hexagon_fmadf5 .p2align 5 __hexagon_fmadf4: __hexagon_fmadf5: fma: { p0 = dfclass(r1:0,#2) p0 = dfclass(r3:2,#2) r13:12 = #0 r15:14 = #0 } { r13:12 = insert(r1:0,#52,#11 -3) r15:14 = insert(r3:2,#52,#11 -3) r7 = ##0x10000000 allocframe(#32) } { r9:8 = mpyu(r12,r14) if (!p0) jump .Lfma_abnormal_ab r13 = or(r13,r7) r15 = or(r15,r7) } { p0 = dfclass(r5:4,#2) if (!p0.new) jump:nt .Lfma_abnormal_c r11:10 = combine(r7,#0) r7:6 = combine(#0,r9) } .Lfma_abnormal_c_restart: { r7:6 += mpyu(r14,r13) r11:10 = insert(r5:4,#52,#11 -3) memd(r29+#0) = r17:16 memd(r29+#8) = r19:18 } { r7:6 += mpyu(r12,r15) r19:18 = neg(r11:10) p0 = cmp.gt(r5,#-1) r28 = xor(r1,r3) } { r18 = extractu(r1,#11,#20) r19 = extractu(r3,#11,#20) r17:16 = combine(#0,r7) if (!p0) r11:10 = r19:18 } { r17:16 += mpyu(r13,r15) r9:8 = combine(r6,r8) r18 = add(r18,r19) r19 = extractu(r5,#11,#20) } { r18 = add(r18,#-1023 +(4)) p3 = !cmp.gt(r28,#-1) r7:6 = #0 r15:14 = #0 } { r7:6 = sub(r7:6,r9:8,p3):carry p0 = !cmp.gt(r28,#-1) p1 = cmp.gt(r19,r18) if (p1.new) r19:18 = combine(r18,r19) } { r15:14 = sub(r15:14,r17:16,p3):carry if (p0) r9:8 = r7:6 r7:6 = #0 r19 = sub(r18,r19) } { if (p0) r17:16 = r15:14 p0 = cmp.gt(r19,#63) if (p1) r9:8 = r7:6 if (p1) r7:6 = r9:8 } { if (p1) r17:16 = r11:10 if (p1) r11:10 = r17:16 if (p0) r19 = add(r19,#-64) r28 = #63 } { if (p0) r7:6 = r11:10 r28 = asr(r11,#31) r13 = min(r19,r28) r12 = #0 } { if (p0) r11:10 = combine(r28,r28) r5:4 = extract(r7:6,r13:12) r7:6 = lsr(r7:6,r13) r12 = sub(#64,r13) } { r15:14 = #0 r28 = #-2 r7:6 |= lsl(r11:10,r12) r11:10 = asr(r11:10,r13) } { p3 = cmp.gtu(r5:4,r15:14) if (p3.new) r6 = and(r6,r28) r15:14 = #1 r5:4 = #0 } { r9:8 = add(r7:6,r9:8,p3):carry } { r17:16 = add(r11:10,r17:16,p3):carry r28 = #62 } { r12 = add(clb(r17:16),#-2) if (!cmp.eq(r12.new,r28)) jump:t 1f } { r11:10 = extractu(r9:8,#62,#2) r9:8 = asl(r9:8,#62) r18 = add(r18,#-62) } { r17:16 = insert(r11:10,#62,#0) } { r12 = add(clb(r17:16),#-2) } .falign 1: { r11:10 = asl(r17:16,r12) r5:4 |= asl(r9:8,r12) r13 = sub(#64,r12) r18 = sub(r18,r12) } { r11:10 |= lsr(r9:8,r13) p2 = cmp.gtu(r15:14,r5:4) r28 = #1023 +1023 -2 } { if (!p2) r10 = or(r10,r14) p0 = !cmp.gt(r18,r28) p0 = cmp.gt(r18,#1) if (!p0.new) jump:nt .Lfma_ovf_unf } { p0 = cmp.gtu(r15:14,r11:10) r1:0 = convert_d2df(r11:10) r18 = add(r18,#-1023 -60) r17:16 = memd(r29+#0) } { r1 += asl(r18,#20) r19:18 = memd(r29+#8) if (!p0) dealloc_return } .Ladd_yields_zero: { r28 = USR r1:0 = #0 } { r28 = extractu(r28,#2,#22) r17:16 = memd(r29+#0) r19:18 = memd(r29+#8) } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = ##0x80000000 dealloc_return } .Lfma_ovf_unf: { p0 = cmp.gtu(r15:14,r11:10) if (p0.new) jump:nt .Ladd_yields_zero } { r1:0 = convert_d2df(r11:10) r18 = add(r18,#-1023 -60) r28 = r18 } { r1 += asl(r18,#20) r7 = extractu(r1,#11,#20) } { r6 = add(r18,r7) r17:16 = memd(r29+#0) r19:18 = memd(r29+#8) r9:8 = abs(r11:10) } { p0 = cmp.gt(r6,##1023 +1023) if (p0.new) jump:nt .Lfma_ovf } { p0 = cmp.gt(r6,#0) if (p0.new) jump:nt .Lpossible_unf0 } { r7 = add(clb(r9:8),#-2) r6 = sub(#1+5,r28) p3 = cmp.gt(r11,#-1) } { r6 = add(r6,r7) r9:8 = asl(r9:8,r7) r1 = USR r28 = #63 } { r7 = min(r6,r28) r6 = #0 r0 = #0x0030 } { r3:2 = extractu(r9:8,r7:6) r9:8 = asr(r9:8,r7) } { p0 = cmp.gtu(r15:14,r3:2) if (!p0.new) r8 = or(r8,r14) r9 = setbit(r9,#20 +3) } { r11:10 = neg(r9:8) p1 = bitsclr(r8,#(1<<3)-1) if (!p1.new) r1 = or(r1,r0) r3:2 = #0 } { if (p3) r11:10 = r9:8 USR = r1 r28 = #-1023 -(52 +3) } { r1:0 = convert_d2df(r11:10) } { r1 += asl(r28,#20) dealloc_return } .Lpossible_unf0: { r28 = ##0x7fefffff r9:8 = abs(r11:10) } { p0 = cmp.eq(r0,#0) p0 = bitsclr(r1,r28) if (!p0.new) dealloc_return:t r28 = #0x7fff } { p0 = bitsset(r9,r28) r3 = USR r2 = #0x0030 } { if (p0) r3 = or(r3,r2) } { USR = r3 } { p0 = dfcmp.eq(r1:0,r1:0) dealloc_return } .Lfma_ovf: { r28 = USR r11:10 = combine(##0x7fefffff,#-1) r1:0 = r11:10 } { r9:8 = combine(##0x7ff00000,#0) r3 = extractu(r28,#2,#22) r28 = or(r28,#0x28) } { USR = r28 r3 ^= lsr(r1,#31) r2 = r3 } { p0 = !cmp.eq(r2,#1) p0 = !cmp.eq(r3,#2) } { p0 = dfcmp.eq(r9:8,r9:8) if (p0.new) r11:10 = r9:8 } { r1:0 = insert(r11:10,#63,#0) dealloc_return } .Lfma_abnormal_ab: { r9:8 = extractu(r1:0,#63,#0) r11:10 = extractu(r3:2,#63,#0) deallocframe } { p3 = cmp.gtu(r9:8,r11:10) if (!p3.new) r1:0 = r3:2 if (!p3.new) r3:2 = r1:0 } { p0 = dfclass(r1:0,#0x0f) if (!p0.new) jump:nt .Lnan if (!p3) r9:8 = r11:10 if (!p3) r11:10 = r9:8 } { p1 = dfclass(r1:0,#0x08) p1 = dfclass(r3:2,#0x0e) } { p0 = dfclass(r1:0,#0x08) p0 = dfclass(r3:2,#0x01) } { if (p1) jump .Lab_inf p2 = dfclass(r3:2,#0x01) } { if (p0) jump .Linvalid if (p2) jump .Lab_true_zero r28 = ##0x7c000000 } { p0 = bitsclr(r1,r28) if (p0.new) jump:nt .Lfma_ab_tiny } { r28 = add(clb(r11:10),#-11) } { r11:10 = asl(r11:10,r28) } { r3:2 = insert(r11:10,#63,#0) r1 -= asl(r28,#20) } jump fma .Lfma_ab_tiny: r9:8 = combine(##0x00100000,#0) { r1:0 = insert(r9:8,#63,#0) r3:2 = insert(r9:8,#63,#0) } jump fma .Lab_inf: { r3:2 = lsr(r3:2,#63) p0 = dfclass(r5:4,#0x10) } { r1:0 ^= asl(r3:2,#63) if (p0) jump .Lnan } { p1 = dfclass(r5:4,#0x08) if (p1.new) jump:nt .Lfma_inf_plus_inf } { jumpr r31 } .falign .Lfma_inf_plus_inf: { p0 = dfcmp.eq(r1:0,r5:4) if (!p0.new) jump:nt .Linvalid } { jumpr r31 } .Lnan: { p0 = dfclass(r3:2,#0x10) p1 = dfclass(r5:4,#0x10) if (!p0.new) r3:2 = r1:0 if (!p1.new) r5:4 = r1:0 } { r3 = convert_df2sf(r3:2) r2 = convert_df2sf(r5:4) } { r3 = convert_df2sf(r1:0) r1:0 = #-1 jumpr r31 } .Linvalid: { r28 = ##0x7f800001 } { r1:0 = convert_sf2df(r28) jumpr r31 } .Lab_true_zero: { p0 = dfclass(r5:4,#0x10) if (p0.new) jump:nt .Lnan if (p0.new) r1:0 = r5:4 } { p0 = dfcmp.eq(r3:2,r5:4) r1 = lsr(r1,#31) } { r3 ^= asl(r1,#31) if (!p0) r1:0 = r5:4 if (!p0) jumpr r31 } { p0 = cmp.eq(r3:2,r5:4) if (p0.new) jumpr:t r31 r1:0 = r3:2 } { r28 = USR } { r28 = extractu(r28,#2,#22) r1:0 = #0 } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = ##0x80000000 jumpr r31 } .falign .Lfma_abnormal_c: { p0 = dfclass(r5:4,#0x10) if (p0.new) jump:nt .Lnan if (p0.new) r1:0 = r5:4 deallocframe } { p0 = dfclass(r5:4,#0x08) if (p0.new) r1:0 = r5:4 if (p0.new) jumpr:nt r31 } { p0 = dfclass(r5:4,#0x01) if (p0.new) jump:nt __hexagon_muldf3 r28 = #1 } { allocframe(#32) r11:10 = #0 r5 = insert(r28,#11,#20) jump .Lfma_abnormal_c_restart } .size fma,.-fma
Ruanyx1823/rust-merge-sve
4,801
library/compiler-builtins/compiler-builtins/src/hexagon/dfaddsub.s
.text .global __hexagon_adddf3 .global __hexagon_subdf3 .type __hexagon_adddf3, @function .type __hexagon_subdf3, @function .global __qdsp_adddf3 ; .set __qdsp_adddf3, __hexagon_adddf3 .global __hexagon_fast_adddf3 ; .set __hexagon_fast_adddf3, __hexagon_adddf3 .global __hexagon_fast2_adddf3 ; .set __hexagon_fast2_adddf3, __hexagon_adddf3 .global __qdsp_subdf3 ; .set __qdsp_subdf3, __hexagon_subdf3 .global __hexagon_fast_subdf3 ; .set __hexagon_fast_subdf3, __hexagon_subdf3 .global __hexagon_fast2_subdf3 ; .set __hexagon_fast2_subdf3, __hexagon_subdf3 .p2align 5 __hexagon_adddf3: { r4 = extractu(r1,#11,#20) r5 = extractu(r3,#11,#20) r13:12 = combine(##0x20000000,#0) } { p3 = dfclass(r1:0,#2) p3 = dfclass(r3:2,#2) r9:8 = r13:12 p2 = cmp.gtu(r5,r4) } { if (!p3) jump .Ladd_abnormal if (p2) r1:0 = r3:2 if (p2) r3:2 = r1:0 if (p2) r5:4 = combine(r4,r5) } { r13:12 = insert(r1:0,#52,#11 -2) r9:8 = insert(r3:2,#52,#11 -2) r15 = sub(r4,r5) r7:6 = combine(#62,#1) } .Ladd_continue: { r15 = min(r15,r7) r11:10 = neg(r13:12) p2 = cmp.gt(r1,#-1) r14 = #0 } { if (!p2) r13:12 = r11:10 r11:10 = extractu(r9:8,r15:14) r9:8 = ASR(r9:8,r15) r15:14 = #0 } { p1 = cmp.eq(r11:10,r15:14) if (!p1.new) r8 = or(r8,r6) r5 = add(r4,#-1024 -60) p3 = cmp.gt(r3,#-1) } { r13:12 = add(r13:12,r9:8) r11:10 = sub(r13:12,r9:8) r7:6 = combine(#54,##2045) } { p0 = cmp.gtu(r4,r7) p0 = !cmp.gtu(r4,r6) if (!p0.new) jump:nt .Ladd_ovf_unf if (!p3) r13:12 = r11:10 } { r1:0 = convert_d2df(r13:12) p0 = cmp.eq(r13,#0) p0 = cmp.eq(r12,#0) if (p0.new) jump:nt .Ladd_zero } { r1 += asl(r5,#20) jumpr r31 } .falign __hexagon_subdf3: { r3 = togglebit(r3,#31) jump __qdsp_adddf3 } .falign .Ladd_zero: { r28 = USR r1:0 = #0 r3 = #1 } { r28 = extractu(r28,#2,#22) r3 = asl(r3,#31) } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = xor(r1,r3) jumpr r31 } .falign .Ladd_ovf_unf: { r1:0 = convert_d2df(r13:12) p0 = cmp.eq(r13,#0) p0 = cmp.eq(r12,#0) if (p0.new) jump:nt .Ladd_zero } { r28 = extractu(r1,#11,#20) r1 += asl(r5,#20) } { r5 = add(r5,r28) r3:2 = combine(##0x00100000,#0) } { p0 = cmp.gt(r5,##1024 +1024 -2) if (p0.new) jump:nt .Ladd_ovf } { p0 = cmp.gt(r5,#0) if (p0.new) jumpr:t r31 r28 = sub(#1,r5) } { r3:2 = insert(r1:0,#52,#0) r1:0 = r13:12 } { r3:2 = lsr(r3:2,r28) } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .falign .Ladd_ovf: { r1:0 = r13:12 r28 = USR r13:12 = combine(##0x7fefffff,#-1) } { r5 = extractu(r28,#2,#22) r28 = or(r28,#0x28) r9:8 = combine(##0x7ff00000,#0) } { USR = r28 r5 ^= lsr(r1,#31) r28 = r5 } { p0 = !cmp.eq(r28,#1) p0 = !cmp.eq(r5,#2) if (p0.new) r13:12 = r9:8 } { r1:0 = insert(r13:12,#63,#0) } { p0 = dfcmp.eq(r1:0,r1:0) jumpr r31 } .Ladd_abnormal: { r13:12 = extractu(r1:0,#63,#0) r9:8 = extractu(r3:2,#63,#0) } { p3 = cmp.gtu(r13:12,r9:8) if (!p3.new) r1:0 = r3:2 if (!p3.new) r3:2 = r1:0 } { p0 = dfclass(r1:0,#0x0f) if (!p0.new) jump:nt .Linvalid_nan_add if (!p3) r13:12 = r9:8 if (!p3) r9:8 = r13:12 } { p1 = dfclass(r1:0,#0x08) if (p1.new) jump:nt .Linf_add } { p2 = dfclass(r3:2,#0x01) if (p2.new) jump:nt .LB_zero r13:12 = #0 } { p0 = dfclass(r1:0,#4) if (p0.new) jump:nt .Ladd_two_subnormal r13:12 = combine(##0x20000000,#0) } { r4 = extractu(r1,#11,#20) r5 = #1 r9:8 = asl(r9:8,#11 -2) } { r13:12 = insert(r1:0,#52,#11 -2) r15 = sub(r4,r5) r7:6 = combine(#62,#1) jump .Ladd_continue } .Ladd_two_subnormal: { r13:12 = extractu(r1:0,#63,#0) r9:8 = extractu(r3:2,#63,#0) } { r13:12 = neg(r13:12) r9:8 = neg(r9:8) p0 = cmp.gt(r1,#-1) p1 = cmp.gt(r3,#-1) } { if (p0) r13:12 = r1:0 if (p1) r9:8 = r3:2 } { r13:12 = add(r13:12,r9:8) } { r9:8 = neg(r13:12) p0 = cmp.gt(r13,#-1) r3:2 = #0 } { if (!p0) r1:0 = r9:8 if (p0) r1:0 = r13:12 r3 = ##0x80000000 } { if (!p0) r1 = or(r1,r3) p0 = dfcmp.eq(r1:0,r3:2) if (p0.new) jump:nt .Lzero_plus_zero } { jumpr r31 } .Linvalid_nan_add: { r28 = convert_df2sf(r1:0) p0 = dfclass(r3:2,#0x0f) if (p0.new) r3:2 = r1:0 } { r2 = convert_df2sf(r3:2) r1:0 = #-1 jumpr r31 } .falign .LB_zero: { p0 = dfcmp.eq(r13:12,r1:0) if (!p0.new) jumpr:t r31 } .Lzero_plus_zero: { p0 = cmp.eq(r1:0,r3:2) if (p0.new) jumpr:t r31 } { r28 = USR } { r28 = extractu(r28,#2,#22) r1:0 = #0 } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = ##0x80000000 jumpr r31 } .Linf_add: { p0 = !cmp.eq(r1,r3) p0 = dfclass(r3:2,#8) if (!p0.new) jumpr:t r31 } { r2 = ##0x7f800001 } { r1:0 = convert_sf2df(r2) jumpr r31 } .size __hexagon_adddf3,.-__hexagon_adddf3
Ruanyx1823/rust-merge-sve
1,295
library/compiler-builtins/compiler-builtins/src/hexagon/memcpy_forward_vp4cp4n2.s
.text .globl hexagon_memcpy_forward_vp4cp4n2 .balign 32 .type hexagon_memcpy_forward_vp4cp4n2,@function hexagon_memcpy_forward_vp4cp4n2: { r3 = sub(##4096, r1) r5 = lsr(r2, #3) } { r3 = extractu(r3, #10, #2) r4 = extractu(r3, #7, #5) } { r3 = minu(r2, r3) r4 = minu(r5, r4) } { r4 = or(r4, ##2105344) p0 = cmp.eq(r3, #0) if (p0.new) jump:nt .Lskipprolog } l2fetch(r1, r4) { loop0(.Lprolog, r3) r2 = sub(r2, r3) } .falign .Lprolog: { r4 = memw(r1++#4) memw(r0++#4) = r4.new } :endloop0 .Lskipprolog: { r3 = lsr(r2, #10) if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain } { loop1(.Lout, r3) r2 = extractu(r2, #10, #0) r3 = ##2105472 } .falign .Lout: l2fetch(r1, r3) loop0(.Lpage, #512) .falign .Lpage: r5:4 = memd(r1++#8) { memw(r0++#8) = r4 memw(r0+#4) = r5 } :endloop0:endloop1 .Lskipmain: { r3 = ##2105344 r4 = lsr(r2, #3) p0 = cmp.eq(r2, #0) if (p0.new) jumpr:nt r31 } { r3 = or(r3, r4) loop0(.Lepilog, r2) } l2fetch(r1, r3) .falign .Lepilog: { r4 = memw(r1++#4) memw(r0++#4) = r4.new } :endloop0 jumpr r31 .size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2
Ruanyx1823/rust-merge-sve
5,659
library/compiler-builtins/compiler-builtins/src/hexagon/dfdiv.s
.text .global __hexagon_divdf3 .type __hexagon_divdf3,@function .global __qdsp_divdf3 ; .set __qdsp_divdf3, __hexagon_divdf3 .global __hexagon_fast_divdf3 ; .set __hexagon_fast_divdf3, __hexagon_divdf3 .global __hexagon_fast2_divdf3 ; .set __hexagon_fast2_divdf3, __hexagon_divdf3 .p2align 5 __hexagon_divdf3: { p2 = dfclass(r1:0,#0x02) p2 = dfclass(r3:2,#0x02) r13:12 = combine(r3,r1) r28 = xor(r1,r3) } { if (!p2) jump .Ldiv_abnormal r7:6 = extractu(r3:2,#23,#52 -23) r8 = ##0x3f800001 } { r9 = or(r8,r6) r13 = extractu(r13,#11,#52 -32) r12 = extractu(r12,#11,#52 -32) p3 = cmp.gt(r28,#-1) } .Ldenorm_continue: { r11,p0 = sfrecipa(r8,r9) r10 = and(r8,#-2) r28 = #1 r12 = sub(r12,r13) } { r10 -= sfmpy(r11,r9):lib r1 = insert(r28,#11 +1,#52 -32) r13 = ##0x00800000 << 3 } { r11 += sfmpy(r11,r10):lib r3 = insert(r28,#11 +1,#52 -32) r10 = and(r8,#-2) } { r10 -= sfmpy(r11,r9):lib r5 = #-0x3ff +1 r4 = #0x3ff -1 } { r11 += sfmpy(r11,r10):lib p1 = cmp.gt(r12,r5) p1 = !cmp.gt(r12,r4) } { r13 = insert(r11,#23,#3) r5:4 = #0 r12 = add(r12,#-61) } { r13 = add(r13,#((-3) << 3)) } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASL(r7:6, # ( 14 )); r1:0 -= asl(r15:14, # 32); } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 1 )); r1:0 -= asl(r15:14, # 32); } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 16 )); r1:0 -= asl(r15:14, # 32); } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 31 )); r1:0 -= asl(r15:14, # 32); r7:6=# ( 0 ); } { r15:14 = sub(r1:0,r3:2) p0 = cmp.gtu(r3:2,r1:0) if (!p0.new) r6 = #2 } { r5:4 = add(r5:4,r7:6) if (!p0) r1:0 = r15:14 r15:14 = #0 } { p0 = cmp.eq(r1:0,r15:14) if (!p0.new) r4 = or(r4,r28) } { r7:6 = neg(r5:4) } { if (!p3) r5:4 = r7:6 } { r1:0 = convert_d2df(r5:4) if (!p1) jump .Ldiv_ovf_unf } { r1 += asl(r12,#52 -32) jumpr r31 } .Ldiv_ovf_unf: { r1 += asl(r12,#52 -32) r13 = extractu(r1,#11,#52 -32) } { r7:6 = abs(r5:4) r12 = add(r12,r13) } { p0 = cmp.gt(r12,##0x3ff +0x3ff) if (p0.new) jump:nt .Ldiv_ovf } { p0 = cmp.gt(r12,#0) if (p0.new) jump:nt .Lpossible_unf2 } { r13 = add(clb(r7:6),#-1) r12 = sub(#7,r12) r10 = USR r11 = #63 } { r13 = min(r12,r11) r11 = or(r10,#0x030) r7:6 = asl(r7:6,r13) r12 = #0 } { r15:14 = extractu(r7:6,r13:12) r7:6 = lsr(r7:6,r13) r3:2 = #1 } { p0 = cmp.gtu(r3:2,r15:14) if (!p0.new) r6 = or(r2,r6) r7 = setbit(r7,#52 -32+4) } { r5:4 = neg(r7:6) p0 = bitsclr(r6,#(1<<4)-1) if (!p0.new) r10 = r11 } { USR = r10 if (p3) r5:4 = r7:6 r10 = #-0x3ff -(52 +4) } { r1:0 = convert_d2df(r5:4) } { r1 += asl(r10,#52 -32) jumpr r31 } .Lpossible_unf2: { r3:2 = extractu(r1:0,#63,#0) r15:14 = combine(##0x00100000,#0) r10 = #0x7FFF } { p0 = dfcmp.eq(r15:14,r3:2) p0 = bitsset(r7,r10) } { if (!p0) jumpr r31 r10 = USR } { r10 = or(r10,#0x30) } { USR = r10 } { p0 = dfcmp.eq(r1:0,r1:0) jumpr r31 } .Ldiv_ovf: { r10 = USR r3:2 = combine(##0x7fefffff,#-1) r1 = mux(p3,#0,#-1) } { r7:6 = combine(##0x7ff00000,#0) r5 = extractu(r10,#2,#22) r10 = or(r10,#0x28) } { USR = r10 r5 ^= lsr(r1,#31) r4 = r5 } { p0 = !cmp.eq(r4,#1) p0 = !cmp.eq(r5,#2) if (p0.new) r3:2 = r7:6 p0 = dfcmp.eq(r3:2,r3:2) } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .Ldiv_abnormal: { p0 = dfclass(r1:0,#0x0F) p0 = dfclass(r3:2,#0x0F) p3 = cmp.gt(r28,#-1) } { p1 = dfclass(r1:0,#0x08) p1 = dfclass(r3:2,#0x08) } { p2 = dfclass(r1:0,#0x01) p2 = dfclass(r3:2,#0x01) } { if (!p0) jump .Ldiv_nan if (p1) jump .Ldiv_invalid } { if (p2) jump .Ldiv_invalid } { p2 = dfclass(r1:0,#(0x0F ^ 0x01)) p2 = dfclass(r3:2,#(0x0F ^ 0x08)) } { p1 = dfclass(r1:0,#(0x0F ^ 0x08)) p1 = dfclass(r3:2,#(0x0F ^ 0x01)) } { if (!p2) jump .Ldiv_zero_result if (!p1) jump .Ldiv_inf_result } { p0 = dfclass(r1:0,#0x02) p1 = dfclass(r3:2,#0x02) r10 = ##0x00100000 } { r13:12 = combine(r3,r1) r1 = insert(r10,#11 +1,#52 -32) r3 = insert(r10,#11 +1,#52 -32) } { if (p0) r1 = or(r1,r10) if (p1) r3 = or(r3,r10) } { r5 = add(clb(r1:0),#-11) r4 = add(clb(r3:2),#-11) r10 = #1 } { r12 = extractu(r12,#11,#52 -32) r13 = extractu(r13,#11,#52 -32) } { r1:0 = asl(r1:0,r5) r3:2 = asl(r3:2,r4) if (!p0) r12 = sub(r10,r5) if (!p1) r13 = sub(r10,r4) } { r7:6 = extractu(r3:2,#23,#52 -23) } { r9 = or(r8,r6) jump .Ldenorm_continue } .Ldiv_zero_result: { r1 = xor(r1,r3) r3:2 = #0 } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .Ldiv_inf_result: { p2 = dfclass(r3:2,#0x01) p2 = dfclass(r1:0,#(0x0F ^ 0x08)) } { r10 = USR if (!p2) jump 1f r1 = xor(r1,r3) } { r10 = or(r10,#0x04) } { USR = r10 } 1: { r3:2 = combine(##0x7ff00000,#0) p0 = dfcmp.uo(r3:2,r3:2) } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .Ldiv_nan: { p0 = dfclass(r1:0,#0x10) p1 = dfclass(r3:2,#0x10) if (!p0.new) r1:0 = r3:2 if (!p1.new) r3:2 = r1:0 } { r5 = convert_df2sf(r1:0) r4 = convert_df2sf(r3:2) } { r1:0 = #-1 jumpr r31 } .Ldiv_invalid: { r10 = ##0x7f800001 } { r1:0 = convert_sf2df(r10) jumpr r31 } .size __hexagon_divdf3,.-__hexagon_divdf3
Ruanyx1823/rust-merge-sve
5,120
library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_dlib_asm.s
.text .global __hexagon_fast2_dadd_asm .type __hexagon_fast2_dadd_asm, @function __hexagon_fast2_dadd_asm: .falign { R7:6 = VABSDIFFH(R1:0, R3:2) R9 = #62 R4 = SXTH(R0) R5 = SXTH(R2) } { R6 = SXTH(R6) P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { if ( P0) R4 = #1 if (!P0) R5 = #1 R0.L = #0 R6 = MIN(R6, R9) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) R2.L = #0 R11:10 = #0 } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = add(R1:0, R3:2) R10.L = #0x8001 } { R4 = clb(R1:0) R9 = #58 } { R4 = add(R4, #-1) p0 = cmp.gt(R4, R9) } { R1:0 = ASL(R1:0, R4) R8 = SUB(R8, R4) if(p0) jump .Ldenorma } { R0 = insert(R8, #16, #0) jumpr r31 } .Ldenorma: { R1:0 = R11:10 jumpr r31 } .text .global __hexagon_fast2_dsub_asm .type __hexagon_fast2_dsub_asm, @function __hexagon_fast2_dsub_asm: .falign { R7:6 = VABSDIFFH(R1:0, R3:2) R9 = #62 R4 = SXTH(R0) R5 = SXTH(R2) } { R6 = SXTH(R6) P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { if ( P0) R4 = #1 if (!P0) R5 = #1 R0.L = #0 R6 = MIN(R6, R9) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) R2.L = #0 R11:10 = #0 } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = sub(R1:0, R3:2) R10.L = #0x8001 } { R4 = clb(R1:0) R9 = #58 } { R4 = add(R4, #-1) p0 = cmp.gt(R4, R9) } { R1:0 = ASL(R1:0, R4) R8 = SUB(R8, R4) if(p0) jump .Ldenorm } { R0 = insert(R8, #16, #0) jumpr r31 } .Ldenorm: { R1:0 = R11:10 jumpr r31 } .text .global __hexagon_fast2_dmpy_asm .type __hexagon_fast2_dmpy_asm, @function __hexagon_fast2_dmpy_asm: .falign { R13= lsr(R2, #16) R5 = sxth(R2) R4 = sxth(R0) R12= lsr(R0, #16) } { R11:10 = mpy(R1, R3) R7:6 = mpy(R1, R13) R0.L = #0x0 R15:14 = #0 } { R11:10 = add(R11:10, R11:10) R7:6 += mpy(R3, R12) R2.L = #0x0 R15.H = #0x8000 } { R7:6 = asr(R7:6, #15) R12.L = #0x8001 p1 = cmp.eq(R1:0, R3:2) } { R7:6 = add(R7:6, R11:10) R8 = add(R4, R5) p2 = cmp.eq(R1:0, R15:14) } { R9 = clb(R7:6) R3:2 = abs(R7:6) R11 = #58 } { p1 = and(p1, p2) R8 = sub(R8, R9) R9 = add(R9, #-1) p0 = cmp.gt(R9, R11) } { R8 = add(R8, #1) R1:0 = asl(R7:6, R9) if(p1) jump .Lsat } { R0 = insert(R8,#16, #0) if(!p0) jumpr r31 } { R0 = insert(R12,#16, #0) jumpr r31 } .Lsat: { R1:0 = #-1 } { R1:0 = lsr(R1:0, #1) } { R0 = insert(R8,#16, #0) jumpr r31 } .text .global __hexagon_fast2_qd2f_asm .type __hexagon_fast2_qd2f_asm, @function __hexagon_fast2_qd2f_asm: .falign { R3 = abs(R1):sat R4 = sxth(R0) R5 = #0x40 R6.L = #0xffc0 } { R0 = extractu(R3, #8, #0) p2 = cmp.gt(R4, #126) p3 = cmp.ge(R4, #-126) R6.H = #0x7fff } { p1 = cmp.eq(R0,#0x40) if(p1.new) R5 = #0 R4 = add(R4, #126) if(!p3) jump .Lmin } { p0 = bitsset(R3, R6) R0.L = #0x0000 R2 = add(R3, R5) R7 = lsr(R6, #8) } { if(p0) R4 = add(R4, #1) if(p0) R3 = #0 R2 = lsr(R2, #7) R0.H = #0x8000 } { R0 = and(R0, R1) R6 &= asl(R4, #23) if(!p0) R3 = and(R2, R7) if(p2) jump .Lmax } { R0 += add(R6, R3) jumpr r31 } .Lmax: { R0.L = #0xffff; } { R0.H = #0x7f7f; jumpr r31 } .Lmin: { R0 = #0x0 jumpr r31 } .text .global __hexagon_fast2_f2qd_asm .type __hexagon_fast2_f2qd_asm, @function __hexagon_fast2_f2qd_asm: .falign { R1 = asl(R0, #7) p0 = tstbit(R0, #31) R5:4 = #0 R3 = add(R0,R0) } { R1 = setbit(R1, #30) R0= extractu(R0,#8,#23) R4.L = #0x8001 p1 = cmp.eq(R3, #0) } { R1= extractu(R1, #31, #0) R0= add(R0, #-126) R2 = #0 if(p1) jump .Lminqd } { R0 = zxth(R0) if(p0) R1= sub(R2, R1) jumpr r31 } .Lminqd: { R1:0 = R5:4 jumpr r31 }
Ruanyx1823/rust-merge-sve
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
rugo/OpenSK
3,594
third_party/libtock-rs/runtime/asm/asm_arm.S
/* rt_header is defined by the general linker script (libtock_layout.ld). It has * the following layout: * * Field | Offset * ------------------------------------ * Address of the start symbol | 0 * Initial process break | 4 * Top of the stack | 8 * Size of .data | 12 * Start of .data in flash | 16 * Start of .data in ram | 20 * Size of .bss | 24 * Start of .bss in ram | 28 */ /* start is the entry point -- the first code executed by the kernel. The kernel * passes arguments through 4 registers: * * r0 Pointer to beginning of the process binary's code. The linker script * locates rt_header at this address. * * r1 Address of the beginning of the process's usable memory region. * r2 Size of the process' allocated memory region (including grant region) * r3 Process break provided by the kernel. * * We currently only use the value in r0. It is copied into r5 early on because * r0 is needed to invoke system calls. */ .section .start, "ax" .global start .thumb_func start: /* First, verify the process binary was loaded at the correct address. The * check is performed by comparing the program counter at the start to the * address of `start`, which is stored in rt_header. */ mov r4, pc /* r4 = address of .start + 4 (Thumb bit unset) */ mov r5, r0 /* Save rt_header; we use r0 for syscalls */ ldr r0, [r5, #0] /* r0 = rt_header.start */ add r0, #3 /* r0 = rt_header.start + 4 - 1 (for Thumb bit) */ cmp r0, r4 beq .Lset_brk /* Skip error handling if pc correct */ /* If the beq on the previous line did not jump, then the binary is not at * the correct location. Report the error via LowLevelDebug then exit. */ mov r0, #8 /* LowLevelDebug driver number */ mov r1, #1 /* Command: print alert code */ mov r2, #2 /* Alert code 2 (incorrect location */ svc 2 /* Execute `command` */ mov r0, #0 /* Operation: exit-terminate */ svc 6 /* Execute `exit` */ .Lset_brk: /* memop(): set brk to rt_header's initial break value */ mov r0, #0 /* operation: set break */ ldr r1, [r5, #4] /* rt_header`s initial process break */ svc 5 /* call `memop` */ /* Set the stack pointer */ ldr r0, [r5, #8] /* r0 = rt_header._stack_top */ mov sp, r0 /* Copy .data into place */ ldr r0, [r5, #12] /* remaining = rt_header.data_size */ cbz r0, .Lzero_bss /* Jump to zero_bss if remaining == 0 */ ldr r1, [r5, #16] /* src = rt_header.data_flash_start */ ldr r2, [r5, #20] /* dest = rt_header.data_ram_start */ .Ldata_loop_body: ldr r3, [r1] /* r3 = *src */ str r3, [r2] /* *(dest) = r3 */ sub r0, #4 /* remaining -= 4 */ add r1, #4 /* src += 4 */ add r2, #4 /* dest += 4 */ cmp r0, #0 bne .Ldata_loop_body /* Iterate again if remaining != 0 */ .Lzero_bss: ldr r0, [r5, #24] /* remaining = rt_header.bss_size */ cbz r0, .Lcall_rust_start /* Jump to call_rust_start if remaining == 0 */ ldr r1, [r5, #28] /* dest = rt_header.bss_start */ mov r2, #0 /* r2 = 0 */ .Lbss_loop_body: strb r2, [r1] /* *(dest) = r2 = 0 */ sub r0, #1 /* remaining -= 1 */ add r1, #1 /* dest += 1 */ cmp r0, #0 bne .Lbss_loop_body /* Iterate again if remaining != 0 */ .Lcall_rust_start: bl rust_start
rugo/OpenSK
3,548
third_party/libtock-rs/runtime/asm/asm_riscv32.S
/* rt_header is defined by the general linker script (libtock_layout.ld). It has * the following layout: * * Field | Offset * ------------------------------------ * Address of the start symbol | 0 * Initial process break | 4 * Top of the stack | 8 * Size of .data | 12 * Start of .data in flash | 16 * Start of .data in ram | 20 * Size of .bss | 24 * Start of .bss in ram | 28 */ /* start is the entry point -- the first code executed by the kernel. The kernel * passes arguments through 4 registers: * * a0 Pointer to beginning of the process binary's code. The linker script * locates rt_header at this address. * * a1 Address of the beginning of the process's usable memory region. * a2 Size of the process' allocated memory region (including grant region) * a3 Process break provided by the kernel. * * We currently only use the value in a0. It is copied into a5 early on because * a0-a4 are needed to invoke system calls. */ .section .start, "ax" .globl start start: /* First, verify the process binary was loaded at the correct address. The * check is performed by comparing the program counter at the start to the * address of `start`, which is stored in rt_header. */ auipc s0, 0 /* s0 = pc */ mv a5, a0 /* Save rt_header so syscalls don't overwrite it */ lw s1, 0(a5) /* s1 = rt_header.start */ beq s0, s1, .Lset_brk /* Skip error handling code if pc is correct */ /* If the beq on the previous line did not jump, then the binary is not at * the correct location. Report the error via LowLevelDebug then exit. */ li a0, 8 /* LowLevelDebug driver number */ li a1, 1 /* Command: Print alert code */ li a2, 2 /* Alert code 2 (incorrect location) */ li a4, 2 /* `command` class */ ecall li a0, 0 /* exit-terminate */ /* TODO: Set a completion code, once completion codes are decided */ li a4, 6 /* `exit` class */ ecall .Lset_brk: /* memop(): set brk to rt_header's initial break value */ li a0, 0 /* operation: set break */ lw a1, 4(a5) /* rt_header's initial process break */ li a4, 5 /* `memop` class */ ecall /* Set the stack pointer */ lw sp, 8(a5) /* sp = rt_header._stack_top */ /* Copy .data into place. */ lw a0, 12(a5) /* remaining = rt_header.data_size */ beqz a0, .Lzero_bss /* Jump to zero_bss if remaining is zero */ lw a1, 16(a5) /* src = rt_header.data_flash_start */ lw a2, 20(a5) /* dest = rt_header.data_ram_start */ .Ldata_loop_body: lw a3, 0(a1) /* a3 = *src */ sw a3, 0(a2) /* *dest = a3 */ addi a0, a0, -4 /* remaining -= 4 */ addi a1, a1, 4 /* src += 4 */ addi a2, a2, 4 /* dest += 4 */ bnez a0, .Ldata_loop_body /* Iterate again if remaining != 0 */ .Lzero_bss: lw a0, 24(a5) /* remaining = rt_header.bss_size */ beqz a0, .Lcall_rust_start /* Jump to call_Main if remaining is zero */ lw a1, 28(a5) /* dest = rt_header.bss_start */ .Lbss_loop_body: sb zero, 0(a1) /* *dest = zero */ addi a0, a0, -1 /* remaining -= 1 */ addi a1, a1, 1 /* dest += 1 */ bnez a0, .Lbss_loop_body /* Iterate again if remaining != 0 */ .Lcall_rust_start: /* Note: rust_start must be a diverging function (i.e. return `!`) */ jal rust_start
Rudy-Orozco/RudyProjects
2,963
Past_Projects/Operating_Sys/xv6-encrypted-filesystem-emus-main/kernel/kernelvec.S
# # interrupts and exceptions while in supervisor # mode come here. # # the current stack is a kernel stack. # push all registers, call kerneltrap(). # when kerneltrap() returns, restore registers, return. # .globl kerneltrap .globl kernelvec .align 4 kernelvec: # make room to save registers. addi sp, sp, -256 # save the registers. sd ra, 0(sp) sd sp, 8(sp) sd gp, 16(sp) sd tp, 24(sp) sd t0, 32(sp) sd t1, 40(sp) sd t2, 48(sp) sd s0, 56(sp) sd s1, 64(sp) sd a0, 72(sp) sd a1, 80(sp) sd a2, 88(sp) sd a3, 96(sp) sd a4, 104(sp) sd a5, 112(sp) sd a6, 120(sp) sd a7, 128(sp) sd s2, 136(sp) sd s3, 144(sp) sd s4, 152(sp) sd s5, 160(sp) sd s6, 168(sp) sd s7, 176(sp) sd s8, 184(sp) sd s9, 192(sp) sd s10, 200(sp) sd s11, 208(sp) sd t3, 216(sp) sd t4, 224(sp) sd t5, 232(sp) sd t6, 240(sp) # call the C trap handler in trap.c call kerneltrap # restore registers. ld ra, 0(sp) ld sp, 8(sp) ld gp, 16(sp) # not tp (contains hartid), in case we moved CPUs ld t0, 32(sp) ld t1, 40(sp) ld t2, 48(sp) ld s0, 56(sp) ld s1, 64(sp) ld a0, 72(sp) ld a1, 80(sp) ld a2, 88(sp) ld a3, 96(sp) ld a4, 104(sp) ld a5, 112(sp) ld a6, 120(sp) ld a7, 128(sp) ld s2, 136(sp) ld s3, 144(sp) ld s4, 152(sp) ld s5, 160(sp) ld s6, 168(sp) ld s7, 176(sp) ld s8, 184(sp) ld s9, 192(sp) ld s10, 200(sp) ld s11, 208(sp) ld t3, 216(sp) ld t4, 224(sp) ld t5, 232(sp) ld t6, 240(sp) addi sp, sp, 256 # return to whatever we were doing in the kernel. sret # # machine-mode timer interrupt. # .globl timervec .align 4 timervec: # start.c has set up the memory that mscratch points to: # scratch[0,8,16] : register save area. # scratch[24] : address of CLINT's MTIMECMP register. # scratch[32] : desired interval between interrupts. csrrw a0, mscratch, a0 sd a1, 0(a0) sd a2, 8(a0) sd a3, 16(a0) # schedule the next timer interrupt # by adding interval to mtimecmp. ld a1, 24(a0) # CLINT_MTIMECMP(hart) ld a2, 32(a0) # interval ld a3, 0(a1) add a3, a3, a2 sd a3, 0(a1) # arrange for a supervisor software interrupt # after this handler returns. li a1, 2 csrw sip, a1 ld a3, 16(a0) ld a2, 8(a0) ld a1, 0(a0) csrrw a0, mscratch, a0 mret
Rudy-Orozco/RudyProjects
3,839
Past_Projects/Operating_Sys/xv6-encrypted-filesystem-emus-main/kernel/trampoline.S
# # low-level code to handle traps from user space into # the kernel, and returns from kernel to user. # # the kernel maps the page holding this code # at the same virtual address (TRAMPOLINE) # in user and kernel space so that it continues # to work when it switches page tables. # kernel.ld causes this code to start at # a page boundary. # #include "riscv.h" #include "memlayout.h" .section trampsec .globl trampoline trampoline: .align 4 .globl uservec uservec: # # trap.c sets stvec to point here, so # traps from user space start here, # in supervisor mode, but with a # user page table. # # save user a0 in sscratch so # a0 can be used to get at TRAPFRAME. csrw sscratch, a0 # each process has a separate p->trapframe memory area, # but it's mapped to the same virtual address # (TRAPFRAME) in every process's user page table. li a0, TRAPFRAME # save the user registers in TRAPFRAME sd ra, 40(a0) sd sp, 48(a0) sd gp, 56(a0) sd tp, 64(a0) sd t0, 72(a0) sd t1, 80(a0) sd t2, 88(a0) sd s0, 96(a0) sd s1, 104(a0) sd a1, 120(a0) sd a2, 128(a0) sd a3, 136(a0) sd a4, 144(a0) sd a5, 152(a0) sd a6, 160(a0) sd a7, 168(a0) sd s2, 176(a0) sd s3, 184(a0) sd s4, 192(a0) sd s5, 200(a0) sd s6, 208(a0) sd s7, 216(a0) sd s8, 224(a0) sd s9, 232(a0) sd s10, 240(a0) sd s11, 248(a0) sd t3, 256(a0) sd t4, 264(a0) sd t5, 272(a0) sd t6, 280(a0) # save the user a0 in p->trapframe->a0 csrr t0, sscratch sd t0, 112(a0) # initialize kernel stack pointer, from p->trapframe->kernel_sp ld sp, 8(a0) # make tp hold the current hartid, from p->trapframe->kernel_hartid ld tp, 32(a0) # load the address of usertrap(), from p->trapframe->kernel_trap ld t0, 16(a0) # fetch the kernel page table address, from p->trapframe->kernel_satp. ld t1, 0(a0) # wait for any previous memory operations to complete, so that # they use the user page table. sfence.vma zero, zero # install the kernel page table. csrw satp, t1 # flush now-stale user entries from the TLB. sfence.vma zero, zero # jump to usertrap(), which does not return jr t0 .globl userret userret: # userret(pagetable) # called by usertrapret() in trap.c to # switch from kernel to user. # a0: user page table, for satp. # switch to the user page table. sfence.vma zero, zero csrw satp, a0 sfence.vma zero, zero li a0, TRAPFRAME # restore all but a0 from TRAPFRAME ld ra, 40(a0) ld sp, 48(a0) ld gp, 56(a0) ld tp, 64(a0) ld t0, 72(a0) ld t1, 80(a0) ld t2, 88(a0) ld s0, 96(a0) ld s1, 104(a0) ld a1, 120(a0) ld a2, 128(a0) ld a3, 136(a0) ld a4, 144(a0) ld a5, 152(a0) ld a6, 160(a0) ld a7, 168(a0) ld s2, 176(a0) ld s3, 184(a0) ld s4, 192(a0) ld s5, 200(a0) ld s6, 208(a0) ld s7, 216(a0) ld s8, 224(a0) ld s9, 232(a0) ld s10, 240(a0) ld s11, 248(a0) ld t3, 256(a0) ld t4, 264(a0) ld t5, 272(a0) ld t6, 280(a0) # restore user a0 ld a0, 112(a0) # return to user mode and user pc. # usertrapret() set up sstatus and sepc. sret
Rudy-Orozco/RudyProjects
2,963
Past_Projects/Operating_Sys/scheduler-Rudy-Orozco-main/kernel/kernelvec.S
# # interrupts and exceptions while in supervisor # mode come here. # # the current stack is a kernel stack. # push all registers, call kerneltrap(). # when kerneltrap() returns, restore registers, return. # .globl kerneltrap .globl kernelvec .align 4 kernelvec: # make room to save registers. addi sp, sp, -256 # save the registers. sd ra, 0(sp) sd sp, 8(sp) sd gp, 16(sp) sd tp, 24(sp) sd t0, 32(sp) sd t1, 40(sp) sd t2, 48(sp) sd s0, 56(sp) sd s1, 64(sp) sd a0, 72(sp) sd a1, 80(sp) sd a2, 88(sp) sd a3, 96(sp) sd a4, 104(sp) sd a5, 112(sp) sd a6, 120(sp) sd a7, 128(sp) sd s2, 136(sp) sd s3, 144(sp) sd s4, 152(sp) sd s5, 160(sp) sd s6, 168(sp) sd s7, 176(sp) sd s8, 184(sp) sd s9, 192(sp) sd s10, 200(sp) sd s11, 208(sp) sd t3, 216(sp) sd t4, 224(sp) sd t5, 232(sp) sd t6, 240(sp) # call the C trap handler in trap.c call kerneltrap # restore registers. ld ra, 0(sp) ld sp, 8(sp) ld gp, 16(sp) # not tp (contains hartid), in case we moved CPUs ld t0, 32(sp) ld t1, 40(sp) ld t2, 48(sp) ld s0, 56(sp) ld s1, 64(sp) ld a0, 72(sp) ld a1, 80(sp) ld a2, 88(sp) ld a3, 96(sp) ld a4, 104(sp) ld a5, 112(sp) ld a6, 120(sp) ld a7, 128(sp) ld s2, 136(sp) ld s3, 144(sp) ld s4, 152(sp) ld s5, 160(sp) ld s6, 168(sp) ld s7, 176(sp) ld s8, 184(sp) ld s9, 192(sp) ld s10, 200(sp) ld s11, 208(sp) ld t3, 216(sp) ld t4, 224(sp) ld t5, 232(sp) ld t6, 240(sp) addi sp, sp, 256 # return to whatever we were doing in the kernel. sret # # machine-mode timer interrupt. # .globl timervec .align 4 timervec: # start.c has set up the memory that mscratch points to: # scratch[0,8,16] : register save area. # scratch[24] : address of CLINT's MTIMECMP register. # scratch[32] : desired interval between interrupts. csrrw a0, mscratch, a0 sd a1, 0(a0) sd a2, 8(a0) sd a3, 16(a0) # schedule the next timer interrupt # by adding interval to mtimecmp. ld a1, 24(a0) # CLINT_MTIMECMP(hart) ld a2, 32(a0) # interval ld a3, 0(a1) add a3, a3, a2 sd a3, 0(a1) # arrange for a supervisor software interrupt # after this handler returns. li a1, 2 csrw sip, a1 ld a3, 16(a0) ld a2, 8(a0) ld a1, 0(a0) csrrw a0, mscratch, a0 mret
Rudy-Orozco/RudyProjects
3,839
Past_Projects/Operating_Sys/scheduler-Rudy-Orozco-main/kernel/trampoline.S
# # low-level code to handle traps from user space into # the kernel, and returns from kernel to user. # # the kernel maps the page holding this code # at the same virtual address (TRAMPOLINE) # in user and kernel space so that it continues # to work when it switches page tables. # kernel.ld causes this code to start at # a page boundary. # #include "riscv.h" #include "memlayout.h" .section trampsec .globl trampoline trampoline: .align 4 .globl uservec uservec: # # trap.c sets stvec to point here, so # traps from user space start here, # in supervisor mode, but with a # user page table. # # save user a0 in sscratch so # a0 can be used to get at TRAPFRAME. csrw sscratch, a0 # each process has a separate p->trapframe memory area, # but it's mapped to the same virtual address # (TRAPFRAME) in every process's user page table. li a0, TRAPFRAME # save the user registers in TRAPFRAME sd ra, 40(a0) sd sp, 48(a0) sd gp, 56(a0) sd tp, 64(a0) sd t0, 72(a0) sd t1, 80(a0) sd t2, 88(a0) sd s0, 96(a0) sd s1, 104(a0) sd a1, 120(a0) sd a2, 128(a0) sd a3, 136(a0) sd a4, 144(a0) sd a5, 152(a0) sd a6, 160(a0) sd a7, 168(a0) sd s2, 176(a0) sd s3, 184(a0) sd s4, 192(a0) sd s5, 200(a0) sd s6, 208(a0) sd s7, 216(a0) sd s8, 224(a0) sd s9, 232(a0) sd s10, 240(a0) sd s11, 248(a0) sd t3, 256(a0) sd t4, 264(a0) sd t5, 272(a0) sd t6, 280(a0) # save the user a0 in p->trapframe->a0 csrr t0, sscratch sd t0, 112(a0) # initialize kernel stack pointer, from p->trapframe->kernel_sp ld sp, 8(a0) # make tp hold the current hartid, from p->trapframe->kernel_hartid ld tp, 32(a0) # load the address of usertrap(), from p->trapframe->kernel_trap ld t0, 16(a0) # fetch the kernel page table address, from p->trapframe->kernel_satp. ld t1, 0(a0) # wait for any previous memory operations to complete, so that # they use the user page table. sfence.vma zero, zero # install the kernel page table. csrw satp, t1 # flush now-stale user entries from the TLB. sfence.vma zero, zero # jump to usertrap(), which does not return jr t0 .globl userret userret: # userret(pagetable) # called by usertrapret() in trap.c to # switch from kernel to user. # a0: user page table, for satp. # switch to the user page table. sfence.vma zero, zero csrw satp, a0 sfence.vma zero, zero li a0, TRAPFRAME # restore all but a0 from TRAPFRAME ld ra, 40(a0) ld sp, 48(a0) ld gp, 56(a0) ld tp, 64(a0) ld t0, 72(a0) ld t1, 80(a0) ld t2, 88(a0) ld s0, 96(a0) ld s1, 104(a0) ld a1, 120(a0) ld a2, 128(a0) ld a3, 136(a0) ld a4, 144(a0) ld a5, 152(a0) ld a6, 160(a0) ld a7, 168(a0) ld s2, 176(a0) ld s3, 184(a0) ld s4, 192(a0) ld s5, 200(a0) ld s6, 208(a0) ld s7, 216(a0) ld s8, 224(a0) ld s9, 232(a0) ld s10, 240(a0) ld s11, 248(a0) ld t3, 256(a0) ld t4, 264(a0) ld t5, 272(a0) ld t6, 280(a0) # restore user a0 ld a0, 112(a0) # return to user mode and user pc. # usertrapret() set up sstatus and sepc. sret
rust-bots/rust-dummp
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
rust-lagit1/rust
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
RyoJerryYu/rCore-tutorial-learn
1,640
os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->*TrapContext in user space, sscratch->user stack # save other general purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they have been saved in TrapContext csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it in TrapContext csrr t2, sscratch sd t2, 2*8(sp) # load kernel_satp into t0 ld t0, 34*8(sp) # load trap_handler into t1 ld t1, 36*8(sp) # move to kernel_sp ld sp, 35*8(sp) # switch to kernel space csrw satp, t0 sfence.vma # jump to trap_handler jr t1 __restore: # a0: *TrapContext in user space(Constant); a1: user space token # switch to user space csrw satp, a1 sfence.vma csrw sscratch, a0 mv sp, a0 # now sp points to TrapContext in user space, start restoring based on it # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 # restore general purpose registers except x0/sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # back to user stack ld sp, 2*8(sp) sret
Sakura176/mini-rCore
1,613
os/src/trap/trap.S
# os/src/trap/trap.S .altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp #now sp->kernel stack, sscratch->user stack # allocate a TrapContext on kernel stack addi sp, sp, -34*8 # save general-purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they were saved on kernel stack csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it on the kernel stack csrr t2, sscratch sd t2, 2*8(sp) # set input argument of trap_handler(cx: &mut TrapContext) mv a0, sp call trap_handler __restore: # case1: start running app by __restore # case2: back to U after handling trap # mv sp, a0 # now sp->kernel stck(after allocated), sscratch->user stack # restore sstatus/spec ld t0, 32*8(sp) ld t1, 33*8(sp) ld t2, 2*8(sp) csrw sstatus, t0 csrw sepc, t1 csrw sscratch, t2 # restore general-purpuse registers except sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+ 1 .endr # release TrapContext on kernel stack addi sp, sp, 34*8 # now sp->kernel stack, sscratch->user stack csrrw sp, sscratch, sp sret
salfel/fenix
1,535
kernel/src/asm/setup.S
.global setup_modes .global setup_caches setup_modes: @ IRQ mode mov r0, #0xD2 msr cpsr_c, r0 ldr sp, =irq_stack_end @ Supervisor mode mov r0, #0xD3 msr cpsr_c, r0 ldr sp, =stack_end @ Enable IRQ mov r0, #0x53 msr cpsr_c, r0 bx lr setup_caches: push {lr} bl disable_l1_caches bl invalidate_l1_caches bl invalidate_data_caches bl branch_prediction_enable bl enable_d_side_prefetch pop {pc} disable_l1_caches: mrc p15, 0, r1, c1, c0, 0 bic r1, r1, #(0x1 << 12) bic r1, r1, #(0x1 << 2) mcr p15, 0, r1, c1, c0, 0 bx lr invalidate_l1_caches: mov r1, #0 mcr p15, 0, r1, c7, c5, 0 bx lr invalidate_data_caches: mrc p15, 1, r0, c0, c0, 0 ldr r3, =0x1ff and r0, r3, r0, lsr #13 mov r1, #0 way_loop: mov r3, #0 set_loop: mov r2, r1, lsl #30 orr r2, r3, lsl #5 mcr p15, 0, r2, c7, c6, 2 add r3, r3, #1 cmp r0, r3 bgt set_loop add r1, r1, #1 cmp r1, #4 bne way_loop bx lr invalidate_tlb: mcr p15, 0, r1, c8, c7, 0 bx lr branch_prediction_enable: mov r1, #0 mrc p15, 0, r1, c1, c0, 0 orr r1, r1, #(0x1 << 11) mcr p15, 0, r1, c1, c0, 0 bx lr enable_d_side_prefetch: mrc p15, 0, r1, c1, c0, 1 orr r1, r1, #(0x1 <<2) mcr p15, 0, r1, c1, c0, 1 dsb isb bx lr write_pte: orr r2, r0, r3, lsl #20 str r2, [r1, r3, lsl #2] subs r3, r3, #1 bge write_pte bx lr
salfel/fenix
1,060
kernel/src/asm/interrupts.S
.global irq_handler .global yield_task .global restore_context irq_handler: sub lr, lr, #4 stmfd sp!, {r0-r12, lr} mrs r11, spsr push {r11} bl handle_interrupt mov r0, #0 mcr p15, #0, r0, c7, c10, #4 pop {r11} msr spsr, r11 and r11, r11, #0b11111 cmp r11, #0b10011 beq return_interrupt ldr r0, yielded cmp r0, #0x1 beq store_context return_interrupt: ldmfd sp!, {r0-r12, pc}^ store_context: ldmfd sp!, {r0-r12, lr} str lr, next_pc push {r0} mrs r0, spsr str r0, temp_spsr pop {r0} msr cpsr_c, #0xDF stmfd sp!, {r0-r12, lr} mov r0, #0x0 str r0, yielded ldr r0, temp_spsr push {r0} mov r0, sp ldr r1, next_pc mov r2, #0x0 svc #0x1 restore_context: ldmfd r0!, {r2} msr spsr, r2 push {r1} msr cpsr_c, #0xDF mov sp, r0 pop {r0-r12, lr} msr cpsr_c, #0xD3 ldmfd sp!, {pc}^ yield_task: mov r0, #0x1 str r0, yielded bx lr yielded: .word 0 next_pc: .word 0 temp_spsr: .word 0
samaniheo/galonga
10,572
src/asm/keccakf1600_x86-64-win64.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .def __KeccakF1600; .scl 3; .type 32; .endef .p2align 5 __KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .p2align 5 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .globl KeccakF1600 .def KeccakF1600; .scl 2; .type 32; .endef .p2align 5 KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_KeccakF1600: movq %rcx,%rdi pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $200,%rsp .LSEH_body_KeccakF1600: notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_KeccakF1600: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_KeccakF1600: .globl SHA3_absorb .def SHA3_absorb; .scl 2; .type 32; .endef .p2align 5 SHA3_absorb: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_absorb: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $232,%rsp .LSEH_body_SHA3_absorb: movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .p2align 5 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_SHA3_absorb: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_absorb: .globl SHA3_squeeze .def SHA3_squeeze; .scl 2; .type 32; .endef .p2align 5 SHA3_squeeze: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_squeeze: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %r12 pushq %r13 pushq %r14 subq $32,%rsp .LSEH_body_SHA3_squeeze: shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .p2align 5 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .LSEH_epilogue_SHA3_squeeze: mov 8(%rsp),%rdi mov 16(%rsp),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_squeeze: .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .pdata .p2align 2 .rva .LSEH_begin_KeccakF1600 .rva .LSEH_body_KeccakF1600 .rva .LSEH_info_KeccakF1600_prologue .rva .LSEH_body_KeccakF1600 .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_info_KeccakF1600_body .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_end_KeccakF1600 .rva .LSEH_info_KeccakF1600_epilogue .rva .LSEH_begin_SHA3_absorb .rva .LSEH_body_SHA3_absorb .rva .LSEH_info_SHA3_absorb_prologue .rva .LSEH_body_SHA3_absorb .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_info_SHA3_absorb_body .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_end_SHA3_absorb .rva .LSEH_info_SHA3_absorb_epilogue .rva .LSEH_begin_SHA3_squeeze .rva .LSEH_body_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_prologue .rva .LSEH_body_SHA3_squeeze .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_body .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_end_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_epilogue .section .xdata .p2align 3 .LSEH_info_KeccakF1600_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_KeccakF1600_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x19,0x00 .byte 0x00,0xe4,0x1a,0x00 .byte 0x00,0xd4,0x1b,0x00 .byte 0x00,0xc4,0x1c,0x00 .byte 0x00,0x54,0x1d,0x00 .byte 0x00,0x34,0x1e,0x00 .byte 0x00,0x74,0x20,0x00 .byte 0x00,0x64,0x21,0x00 .byte 0x00,0x01,0x1f,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_KeccakF1600_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_absorb_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x1d,0x00 .byte 0x00,0xe4,0x1e,0x00 .byte 0x00,0xd4,0x1f,0x00 .byte 0x00,0xc4,0x20,0x00 .byte 0x00,0x54,0x21,0x00 .byte 0x00,0x34,0x22,0x00 .byte 0x00,0x74,0x24,0x00 .byte 0x00,0x64,0x25,0x00 .byte 0x00,0x01,0x23,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_squeeze_body: .byte 1,0,11,0 .byte 0x00,0xe4,0x04,0x00 .byte 0x00,0xd4,0x05,0x00 .byte 0x00,0xc4,0x06,0x00 .byte 0x00,0x74,0x08,0x00 .byte 0x00,0x64,0x09,0x00 .byte 0x00,0x62 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_epilogue: .byte 1,0,4,0 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0x00,0x00,0x00
samaniheo/galonga
8,238
src/asm/keccakf1600_x86-64-osx.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .p2align 5 __KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp L$oop .p2align 5 L$oop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz L$oop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .cfi_endproc .globl _KeccakF1600 .p2align 5 _KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $200,%rsp .cfi_adjust_cfa_offset 200 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .globl _SHA3_absorb .p2align 5 _SHA3_absorb: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $232,%rsp .cfi_adjust_cfa_offset 232 movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) L$oop_absorb: cmpq %rcx,%rdx jc L$done_absorb shrq $3,%rcx leaq -100(%rdi),%r8 L$block_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz L$block_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp L$oop_absorb .p2align 5 L$done_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .globl _SHA3_squeeze .p2align 5 _SHA3_squeeze: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-32 subq $32,%rsp .cfi_adjust_cfa_offset 32 shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp L$oop_squeeze .p2align 5 L$oop_squeeze: cmpq $8,%r13 jb L$tail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz L$done_squeeze subq $1,%rcx jnz L$oop_squeeze movq %rdi,%rcx call _KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp L$oop_squeeze L$tail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 L$done_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .cfi_adjust_cfa_offset -56 .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .byte 0xf3,0xc3 .cfi_endproc .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
samaniheo/galonga
8,619
src/asm/keccakf1600_x86-64-elf.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .type __KeccakF1600,@function .align 32 __KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .align 32 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .globl KeccakF1600 .type KeccakF1600,@function .align 32 KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $200,%rsp .cfi_adjust_cfa_offset 200 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .size KeccakF1600,.-KeccakF1600 .globl SHA3_absorb .type SHA3_absorb,@function .align 32 SHA3_absorb: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $232,%rsp .cfi_adjust_cfa_offset 232 movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .align 32 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .size SHA3_absorb,.-SHA3_absorb .globl SHA3_squeeze .type SHA3_squeeze,@function .align 32 SHA3_squeeze: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-32 subq $32,%rsp .cfi_adjust_cfa_offset 32 shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .align 32 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .cfi_adjust_cfa_offset -56 .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .byte 0xf3,0xc3 .cfi_endproc .size SHA3_squeeze,.-SHA3_squeeze .align 256 .quad 0,0,0,0,0,0,0,0 .type iotas,@object iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .size iotas,.-iotas .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .note.gnu.property,"a",@note .long 4,2f-1f,5 .byte 0x47,0x4E,0x55,0 1: .long 0xc0000002,4,3 .align 8 2:
samaniheo/galonga
10,572
src/asm/keccakf1600_x86-64-mingw64.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .def __KeccakF1600; .scl 3; .type 32; .endef .p2align 5 __KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .p2align 5 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .globl KeccakF1600 .def KeccakF1600; .scl 2; .type 32; .endef .p2align 5 KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_KeccakF1600: movq %rcx,%rdi pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $200,%rsp .LSEH_body_KeccakF1600: notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_KeccakF1600: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_KeccakF1600: .globl SHA3_absorb .def SHA3_absorb; .scl 2; .type 32; .endef .p2align 5 SHA3_absorb: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_absorb: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $232,%rsp .LSEH_body_SHA3_absorb: movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .p2align 5 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_SHA3_absorb: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_absorb: .globl SHA3_squeeze .def SHA3_squeeze; .scl 2; .type 32; .endef .p2align 5 SHA3_squeeze: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_squeeze: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %r12 pushq %r13 pushq %r14 subq $32,%rsp .LSEH_body_SHA3_squeeze: shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .p2align 5 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .LSEH_epilogue_SHA3_squeeze: mov 8(%rsp),%rdi mov 16(%rsp),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_squeeze: .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .pdata .p2align 2 .rva .LSEH_begin_KeccakF1600 .rva .LSEH_body_KeccakF1600 .rva .LSEH_info_KeccakF1600_prologue .rva .LSEH_body_KeccakF1600 .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_info_KeccakF1600_body .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_end_KeccakF1600 .rva .LSEH_info_KeccakF1600_epilogue .rva .LSEH_begin_SHA3_absorb .rva .LSEH_body_SHA3_absorb .rva .LSEH_info_SHA3_absorb_prologue .rva .LSEH_body_SHA3_absorb .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_info_SHA3_absorb_body .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_end_SHA3_absorb .rva .LSEH_info_SHA3_absorb_epilogue .rva .LSEH_begin_SHA3_squeeze .rva .LSEH_body_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_prologue .rva .LSEH_body_SHA3_squeeze .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_body .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_end_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_epilogue .section .xdata .p2align 3 .LSEH_info_KeccakF1600_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_KeccakF1600_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x19,0x00 .byte 0x00,0xe4,0x1a,0x00 .byte 0x00,0xd4,0x1b,0x00 .byte 0x00,0xc4,0x1c,0x00 .byte 0x00,0x54,0x1d,0x00 .byte 0x00,0x34,0x1e,0x00 .byte 0x00,0x74,0x20,0x00 .byte 0x00,0x64,0x21,0x00 .byte 0x00,0x01,0x1f,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_KeccakF1600_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_absorb_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x1d,0x00 .byte 0x00,0xe4,0x1e,0x00 .byte 0x00,0xd4,0x1f,0x00 .byte 0x00,0xc4,0x20,0x00 .byte 0x00,0x54,0x21,0x00 .byte 0x00,0x34,0x22,0x00 .byte 0x00,0x74,0x24,0x00 .byte 0x00,0x64,0x25,0x00 .byte 0x00,0x01,0x23,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_squeeze_body: .byte 1,0,11,0 .byte 0x00,0xe4,0x04,0x00 .byte 0x00,0xd4,0x05,0x00 .byte 0x00,0xc4,0x06,0x00 .byte 0x00,0x74,0x08,0x00 .byte 0x00,0x64,0x09,0x00 .byte 0x00,0x62 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_epilogue: .byte 1,0,4,0 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0x00,0x00,0x00
samaniheo/santika
10,572
src/asm/keccakf1600_x86-64-win64.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .def __KeccakF1600; .scl 3; .type 32; .endef .p2align 5 __KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .p2align 5 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .globl KeccakF1600 .def KeccakF1600; .scl 2; .type 32; .endef .p2align 5 KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_KeccakF1600: movq %rcx,%rdi pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $200,%rsp .LSEH_body_KeccakF1600: notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_KeccakF1600: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_KeccakF1600: .globl SHA3_absorb .def SHA3_absorb; .scl 2; .type 32; .endef .p2align 5 SHA3_absorb: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_absorb: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $232,%rsp .LSEH_body_SHA3_absorb: movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .p2align 5 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_SHA3_absorb: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_absorb: .globl SHA3_squeeze .def SHA3_squeeze; .scl 2; .type 32; .endef .p2align 5 SHA3_squeeze: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_squeeze: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %r12 pushq %r13 pushq %r14 subq $32,%rsp .LSEH_body_SHA3_squeeze: shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .p2align 5 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .LSEH_epilogue_SHA3_squeeze: mov 8(%rsp),%rdi mov 16(%rsp),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_squeeze: .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .pdata .p2align 2 .rva .LSEH_begin_KeccakF1600 .rva .LSEH_body_KeccakF1600 .rva .LSEH_info_KeccakF1600_prologue .rva .LSEH_body_KeccakF1600 .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_info_KeccakF1600_body .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_end_KeccakF1600 .rva .LSEH_info_KeccakF1600_epilogue .rva .LSEH_begin_SHA3_absorb .rva .LSEH_body_SHA3_absorb .rva .LSEH_info_SHA3_absorb_prologue .rva .LSEH_body_SHA3_absorb .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_info_SHA3_absorb_body .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_end_SHA3_absorb .rva .LSEH_info_SHA3_absorb_epilogue .rva .LSEH_begin_SHA3_squeeze .rva .LSEH_body_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_prologue .rva .LSEH_body_SHA3_squeeze .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_body .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_end_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_epilogue .section .xdata .p2align 3 .LSEH_info_KeccakF1600_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_KeccakF1600_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x19,0x00 .byte 0x00,0xe4,0x1a,0x00 .byte 0x00,0xd4,0x1b,0x00 .byte 0x00,0xc4,0x1c,0x00 .byte 0x00,0x54,0x1d,0x00 .byte 0x00,0x34,0x1e,0x00 .byte 0x00,0x74,0x20,0x00 .byte 0x00,0x64,0x21,0x00 .byte 0x00,0x01,0x1f,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_KeccakF1600_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_absorb_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x1d,0x00 .byte 0x00,0xe4,0x1e,0x00 .byte 0x00,0xd4,0x1f,0x00 .byte 0x00,0xc4,0x20,0x00 .byte 0x00,0x54,0x21,0x00 .byte 0x00,0x34,0x22,0x00 .byte 0x00,0x74,0x24,0x00 .byte 0x00,0x64,0x25,0x00 .byte 0x00,0x01,0x23,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_squeeze_body: .byte 1,0,11,0 .byte 0x00,0xe4,0x04,0x00 .byte 0x00,0xd4,0x05,0x00 .byte 0x00,0xc4,0x06,0x00 .byte 0x00,0x74,0x08,0x00 .byte 0x00,0x64,0x09,0x00 .byte 0x00,0x62 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_epilogue: .byte 1,0,4,0 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0x00,0x00,0x00
samaniheo/santika
8,238
src/asm/keccakf1600_x86-64-osx.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .p2align 5 __KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp L$oop .p2align 5 L$oop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz L$oop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .cfi_endproc .globl _KeccakF1600 .p2align 5 _KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $200,%rsp .cfi_adjust_cfa_offset 200 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .globl _SHA3_absorb .p2align 5 _SHA3_absorb: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $232,%rsp .cfi_adjust_cfa_offset 232 movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) L$oop_absorb: cmpq %rcx,%rdx jc L$done_absorb shrq $3,%rcx leaq -100(%rdi),%r8 L$block_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz L$block_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp L$oop_absorb .p2align 5 L$done_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .globl _SHA3_squeeze .p2align 5 _SHA3_squeeze: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-32 subq $32,%rsp .cfi_adjust_cfa_offset 32 shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp L$oop_squeeze .p2align 5 L$oop_squeeze: cmpq $8,%r13 jb L$tail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz L$done_squeeze subq $1,%rcx jnz L$oop_squeeze movq %rdi,%rcx call _KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp L$oop_squeeze L$tail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 L$done_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .cfi_adjust_cfa_offset -56 .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .byte 0xf3,0xc3 .cfi_endproc .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
samaniheo/santika
8,619
src/asm/keccakf1600_x86-64-elf.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .type __KeccakF1600,@function .align 32 __KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .align 32 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .globl KeccakF1600 .type KeccakF1600,@function .align 32 KeccakF1600: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $200,%rsp .cfi_adjust_cfa_offset 200 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .size KeccakF1600,.-KeccakF1600 .globl SHA3_absorb .type SHA3_absorb,@function .align 32 SHA3_absorb: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 leaq 100(%rdi),%rdi subq $232,%rsp .cfi_adjust_cfa_offset 232 movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .align 32 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 .cfi_def_cfa %r11,8 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .cfi_restore %rbp .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .size SHA3_absorb,.-SHA3_absorb .globl SHA3_squeeze .type SHA3_squeeze,@function .align 32 SHA3_squeeze: .cfi_startproc .byte 0xf3,0x0f,0x1e,0xfa pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-32 subq $32,%rsp .cfi_adjust_cfa_offset 32 shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .align 32 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .cfi_adjust_cfa_offset -56 .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .byte 0xf3,0xc3 .cfi_endproc .size SHA3_squeeze,.-SHA3_squeeze .align 256 .quad 0,0,0,0,0,0,0,0 .type iotas,@object iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .size iotas,.-iotas .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .note.gnu.property,"a",@note .long 4,2f-1f,5 .byte 0x47,0x4E,0x55,0 1: .long 0xc0000002,4,3 .align 8 2:
samaniheo/santika
10,572
src/asm/keccakf1600_x86-64-mingw64.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .def __KeccakF1600; .scl 3; .type 32; .endef .p2align 5 __KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .p2align 5 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .globl KeccakF1600 .def KeccakF1600; .scl 2; .type 32; .endef .p2align 5 KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_KeccakF1600: movq %rcx,%rdi pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $200,%rsp .LSEH_body_KeccakF1600: notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_KeccakF1600: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_KeccakF1600: .globl SHA3_absorb .def SHA3_absorb; .scl 2; .type 32; .endef .p2align 5 SHA3_absorb: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_absorb: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $232,%rsp .LSEH_body_SHA3_absorb: movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .p2align 5 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_SHA3_absorb: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_absorb: .globl SHA3_squeeze .def SHA3_squeeze; .scl 2; .type 32; .endef .p2align 5 SHA3_squeeze: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_squeeze: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %r12 pushq %r13 pushq %r14 subq $32,%rsp .LSEH_body_SHA3_squeeze: shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .p2align 5 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .LSEH_epilogue_SHA3_squeeze: mov 8(%rsp),%rdi mov 16(%rsp),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_squeeze: .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .pdata .p2align 2 .rva .LSEH_begin_KeccakF1600 .rva .LSEH_body_KeccakF1600 .rva .LSEH_info_KeccakF1600_prologue .rva .LSEH_body_KeccakF1600 .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_info_KeccakF1600_body .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_end_KeccakF1600 .rva .LSEH_info_KeccakF1600_epilogue .rva .LSEH_begin_SHA3_absorb .rva .LSEH_body_SHA3_absorb .rva .LSEH_info_SHA3_absorb_prologue .rva .LSEH_body_SHA3_absorb .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_info_SHA3_absorb_body .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_end_SHA3_absorb .rva .LSEH_info_SHA3_absorb_epilogue .rva .LSEH_begin_SHA3_squeeze .rva .LSEH_body_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_prologue .rva .LSEH_body_SHA3_squeeze .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_body .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_end_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_epilogue .section .xdata .p2align 3 .LSEH_info_KeccakF1600_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_KeccakF1600_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x19,0x00 .byte 0x00,0xe4,0x1a,0x00 .byte 0x00,0xd4,0x1b,0x00 .byte 0x00,0xc4,0x1c,0x00 .byte 0x00,0x54,0x1d,0x00 .byte 0x00,0x34,0x1e,0x00 .byte 0x00,0x74,0x20,0x00 .byte 0x00,0x64,0x21,0x00 .byte 0x00,0x01,0x1f,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_KeccakF1600_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_absorb_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x1d,0x00 .byte 0x00,0xe4,0x1e,0x00 .byte 0x00,0xd4,0x1f,0x00 .byte 0x00,0xc4,0x20,0x00 .byte 0x00,0x54,0x21,0x00 .byte 0x00,0x34,0x22,0x00 .byte 0x00,0x74,0x24,0x00 .byte 0x00,0x64,0x25,0x00 .byte 0x00,0x01,0x23,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_squeeze_body: .byte 1,0,11,0 .byte 0x00,0xe4,0x04,0x00 .byte 0x00,0xd4,0x05,0x00 .byte 0x00,0xc4,0x06,0x00 .byte 0x00,0x74,0x08,0x00 .byte 0x00,0x64,0x09,0x00 .byte 0x00,0x62 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_epilogue: .byte 1,0,4,0 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0x00,0x00,0x00
samchen61661/sp1-cpu
11,855
crates/zkvm/entrypoint/src/memcpy.s
// This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30: // // src/string/memcpy.c // // This was compiled into assembly with: // // clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops // // and labels manually updated to not conflict. // // musl as a whole is licensed under the following standard MIT license: // // ---------------------------------------------------------------------- // Copyright © 2005-2020 Rich Felker, et al. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // ---------------------------------------------------------------------- // // Authors/contributors include: // // A. Wilcox // Ada Worcester // Alex Dowad // Alex Suykov // Alexander Monakov // Andre McCurdy // Andrew Kelley // Anthony G. Basile // Aric Belsito // Arvid Picciani // Bartosz Brachaczek // Benjamin Peterson // Bobby Bingham // Boris Brezillon // Brent Cook // Chris Spiegel // Clément Vasseur // Daniel Micay // Daniel Sabogal // Daurnimator // David Carlier // David Edelsohn // Denys Vlasenko // Dmitry Ivanov // Dmitry V. Levin // Drew DeVault // Emil Renner Berthing // Fangrui Song // Felix Fietkau // Felix Janda // Gianluca Anzolin // Hauke Mehrtens // He X // Hiltjo Posthuma // Isaac Dunham // Jaydeep Patil // Jens Gustedt // Jeremy Huntwork // Jo-Philipp Wich // Joakim Sindholt // John Spencer // Julien Ramseier // Justin Cormack // Kaarle Ritvanen // Khem Raj // Kylie McClain // Leah Neukirchen // Luca Barbato // Luka Perkov // M Farkas-Dyck (Strake) // Mahesh Bodapati // Markus Wichmann // Masanori Ogino // Michael Clark // Michael Forney // Mikhail Kremnyov // Natanael Copa // Nicholas J. Kain // orc // Pascal Cuoq // Patrick Oppenlander // Petr Hosek // Petr Skocik // Pierre Carrier // Reini Urban // Rich Felker // Richard Pennington // Ryan Fairfax // Samuel Holland // Segev Finer // Shiz // sin // Solar Designer // Stefan Kristiansson // Stefan O'Rear // Szabolcs Nagy // Timo Teräs // Trutz Behn // Valentin Ochs // Will Dietz // William Haddon // William Pitcock // // Portions of this software are derived from third-party works licensed // under terms compatible with the above MIT license: // // The TRE regular expression implementation (src/regex/reg* and // src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed // under a 2-clause BSD license (license text in the source files). The // included version has been heavily modified by Rich Felker in 2012, in // the interests of size, simplicity, and namespace cleanliness. // // Much of the math library code (src/math/* and src/complex/*) is // Copyright © 1993,2004 Sun Microsystems or // Copyright © 2003-2011 David Schultz or // Copyright © 2003-2009 Steven G. Kargl or // Copyright © 2003-2009 Bruce D. Evans or // Copyright © 2008 Stephen L. Moshier or // Copyright © 2017-2018 Arm Limited // and labelled as such in comments in the individual source files. All // have been licensed under extremely permissive terms. // // The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 // The Android Open Source Project and is licensed under a two-clause BSD // license. It was taken from Bionic libc, used on Android. // // The AArch64 memcpy and memset code (src/string/aarch64/*) are // Copyright © 1999-2019, Arm Limited. // // The implementation of DES for crypt (src/crypt/crypt_des.c) is // Copyright © 1994 David Burren. It is licensed under a BSD license. // // The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was // originally written by Solar Designer and placed into the public // domain. The code also comes with a fallback permissive license for use // in jurisdictions that may not recognize the public domain. // // The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 // Valentin Ochs and is licensed under an MIT-style license. // // The x86_64 port was written by Nicholas J. Kain and is licensed under // the standard MIT terms. // // The mips and microblaze ports were originally written by Richard // Pennington for use in the ellcc project. The original code was adapted // by Rich Felker for build system and code conventions during upstream // integration. It is licensed under the standard MIT terms. // // The mips64 port was contributed by Imagination Technologies and is // licensed under the standard MIT terms. // // The powerpc port was also originally written by Richard Pennington, // and later supplemented and integrated by John Spencer. It is licensed // under the standard MIT terms. // // All other files which have no copyright comments are original works // produced specifically for use as part of this library, written either // by Rich Felker, the main author of the library, or by one or more // contributors listed above. Details on authorship of individual files // can be found in the git version control history of the project. The // omission of copyright and license comments in each file is in the // interest of source tree size. // // In addition, permission is hereby granted for all public header files // (include/* and arch/* /bits/* ) and crt files intended to be linked into // applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit // the copyright notice and permission notice otherwise required by the // license, and to use these files without any requirement of // attribution. These files include substantial contributions from: // // Bobby Bingham // John Spencer // Nicholas J. Kain // Rich Felker // Richard Pennington // Stefan Kristiansson // Szabolcs Nagy // // all of whom have explicitly granted such permission. // // This file previously contained text expressing a belief that most of // the files covered by the above exception were sufficiently trivial not // to be subject to copyright, resulting in confusion over whether it // negated the permissions granted in the license. In the spirit of // permissive licensing, and of not having licensing issues being an // obstacle to adoption, that text has been removed. .text .attribute 4, 16 .attribute 5, "rv32im" .file "musl_memcpy.c" .globl memcpy .p2align 2 .type memcpy,@function memcpy: andi a3, a1, 3 seqz a3, a3 seqz a4, a2 or a3, a3, a4 bnez a3, .LBBmemcpy0_11 addi a5, a1, 1 mv a6, a0 .LBBmemcpy0_2: lb a7, 0(a1) addi a4, a1, 1 addi a3, a6, 1 sb a7, 0(a6) addi a2, a2, -1 andi a1, a5, 3 snez a1, a1 snez a6, a2 and a7, a1, a6 addi a5, a5, 1 mv a1, a4 mv a6, a3 bnez a7, .LBBmemcpy0_2 andi a1, a3, 3 beqz a1, .LBBmemcpy0_12 .LBBmemcpy0_4: li a5, 32 bltu a2, a5, .LBBmemcpy0_26 li a5, 3 beq a1, a5, .LBBmemcpy0_19 li a5, 2 beq a1, a5, .LBBmemcpy0_22 li a5, 1 bne a1, a5, .LBBmemcpy0_26 lw a5, 0(a4) sb a5, 0(a3) srli a1, a5, 8 sb a1, 1(a3) srli a6, a5, 16 addi a1, a3, 3 sb a6, 2(a3) addi a2, a2, -3 addi a3, a4, 16 li a4, 16 .LBBmemcpy0_9: lw a6, -12(a3) srli a5, a5, 24 slli a7, a6, 8 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 24 slli a6, t0, 8 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 24 slli t0, a7, 8 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 24 slli a7, a5, 8 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_9 addi a4, a3, -13 j .LBBmemcpy0_25 .LBBmemcpy0_11: mv a3, a0 mv a4, a1 andi a1, a3, 3 bnez a1, .LBBmemcpy0_4 .LBBmemcpy0_12: li a1, 16 bltu a2, a1, .LBBmemcpy0_15 li a1, 15 .LBBmemcpy0_14: lw a5, 0(a4) lw a6, 4(a4) lw a7, 8(a4) lw t0, 12(a4) sw a5, 0(a3) sw a6, 4(a3) sw a7, 8(a3) sw t0, 12(a3) addi a4, a4, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a1, a2, .LBBmemcpy0_14 .LBBmemcpy0_15: andi a1, a2, 8 beqz a1, .LBBmemcpy0_17 lw a1, 0(a4) lw a5, 4(a4) sw a1, 0(a3) sw a5, 4(a3) addi a3, a3, 8 addi a4, a4, 8 .LBBmemcpy0_17: andi a1, a2, 4 beqz a1, .LBBmemcpy0_30 lw a1, 0(a4) sw a1, 0(a3) addi a3, a3, 4 addi a4, a4, 4 j .LBBmemcpy0_30 .LBBmemcpy0_19: lw a5, 0(a4) addi a1, a3, 1 sb a5, 0(a3) addi a2, a2, -1 addi a3, a4, 16 li a4, 18 .LBBmemcpy0_20: lw a6, -12(a3) srli a5, a5, 8 slli a7, a6, 24 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 8 slli a6, t0, 24 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 8 slli t0, a7, 24 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 8 slli a7, a5, 24 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_20 addi a4, a3, -15 j .LBBmemcpy0_25 .LBBmemcpy0_22: lw a5, 0(a4) sb a5, 0(a3) srli a6, a5, 8 addi a1, a3, 2 sb a6, 1(a3) addi a2, a2, -2 addi a3, a4, 16 li a4, 17 .LBBmemcpy0_23: lw a6, -12(a3) srli a5, a5, 16 slli a7, a6, 16 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 16 slli a6, t0, 16 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 16 slli t0, a7, 16 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 16 slli a7, a5, 16 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_23 addi a4, a3, -14 .LBBmemcpy0_25: mv a3, a1 .LBBmemcpy0_26: andi a1, a2, 16 bnez a1, .LBBmemcpy0_35 andi a1, a2, 8 bnez a1, .LBBmemcpy0_36 .LBBmemcpy0_28: andi a1, a2, 4 beqz a1, .LBBmemcpy0_30 .LBBmemcpy0_29: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) addi a4, a4, 4 addi a5, a3, 4 sb a1, 3(a3) mv a3, a5 .LBBmemcpy0_30: andi a1, a2, 2 bnez a1, .LBBmemcpy0_33 andi a1, a2, 1 bnez a1, .LBBmemcpy0_34 .LBBmemcpy0_32: ret .LBBmemcpy0_33: lb a1, 0(a4) lb a5, 1(a4) sb a1, 0(a3) addi a4, a4, 2 addi a1, a3, 2 sb a5, 1(a3) mv a3, a1 andi a1, a2, 1 beqz a1, .LBBmemcpy0_32 .LBBmemcpy0_34: lb a1, 0(a4) sb a1, 0(a3) ret .LBBmemcpy0_35: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) lb a5, 4(a4) lb a6, 5(a4) sb a1, 3(a3) lb a1, 6(a4) sb a5, 4(a3) sb a6, 5(a3) lb a5, 7(a4) sb a1, 6(a3) lb a1, 8(a4) lb a6, 9(a4) sb a5, 7(a3) lb a5, 10(a4) sb a1, 8(a3) sb a6, 9(a3) lb a1, 11(a4) sb a5, 10(a3) lb a5, 12(a4) lb a6, 13(a4) sb a1, 11(a3) lb a1, 14(a4) sb a5, 12(a3) sb a6, 13(a3) lb a5, 15(a4) sb a1, 14(a3) addi a4, a4, 16 addi a1, a3, 16 sb a5, 15(a3) mv a3, a1 andi a1, a2, 8 beqz a1, .LBBmemcpy0_28 .LBBmemcpy0_36: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) lb a5, 4(a4) lb a6, 5(a4) sb a1, 3(a3) lb a1, 6(a4) sb a5, 4(a3) sb a6, 5(a3) lb a5, 7(a4) sb a1, 6(a3) addi a4, a4, 8 addi a1, a3, 8 sb a5, 7(a3) mv a3, a1 andi a1, a2, 4 bnez a1, .LBBmemcpy0_29 j .LBBmemcpy0_30 .Lfuncmemcpy_end0: .size memcpy, .Lfuncmemcpy_end0-memcpy .ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157" .section ".note.GNU-stack","",@progbits .addrsig
samchen61661/sp1-cpu
8,450
crates/zkvm/entrypoint/src/memset.s
// This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30: // // src/string/memset.c // // This was compiled into assembly with: // // clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops // // and labels manually updated to not conflict. // // musl as a whole is licensed under the following standard MIT license: // // ---------------------------------------------------------------------- // Copyright © 2005-2020 Rich Felker, et al. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // ---------------------------------------------------------------------- // // Authors/contributors include: // // A. Wilcox // Ada Worcester // Alex Dowad // Alex Suykov // Alexander Monakov // Andre McCurdy // Andrew Kelley // Anthony G. Basile // Aric Belsito // Arvid Picciani // Bartosz Brachaczek // Benjamin Peterson // Bobby Bingham // Boris Brezillon // Brent Cook // Chris Spiegel // Clément Vasseur // Daniel Micay // Daniel Sabogal // Daurnimator // David Carlier // David Edelsohn // Denys Vlasenko // Dmitry Ivanov // Dmitry V. Levin // Drew DeVault // Emil Renner Berthing // Fangrui Song // Felix Fietkau // Felix Janda // Gianluca Anzolin // Hauke Mehrtens // He X // Hiltjo Posthuma // Isaac Dunham // Jaydeep Patil // Jens Gustedt // Jeremy Huntwork // Jo-Philipp Wich // Joakim Sindholt // John Spencer // Julien Ramseier // Justin Cormack // Kaarle Ritvanen // Khem Raj // Kylie McClain // Leah Neukirchen // Luca Barbato // Luka Perkov // M Farkas-Dyck (Strake) // Mahesh Bodapati // Markus Wichmann // Masanori Ogino // Michael Clark // Michael Forney // Mikhail Kremnyov // Natanael Copa // Nicholas J. Kain // orc // Pascal Cuoq // Patrick Oppenlander // Petr Hosek // Petr Skocik // Pierre Carrier // Reini Urban // Rich Felker // Richard Pennington // Ryan Fairfax // Samuel Holland // Segev Finer // Shiz // sin // Solar Designer // Stefan Kristiansson // Stefan O'Rear // Szabolcs Nagy // Timo Teräs // Trutz Behn // Valentin Ochs // Will Dietz // William Haddon // William Pitcock // // Portions of this software are derived from third-party works licensed // under terms compatible with the above MIT license: // // The TRE regular expression implementation (src/regex/reg* and // src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed // under a 2-clause BSD license (license text in the source files). The // included version has been heavily modified by Rich Felker in 2012, in // the interests of size, simplicity, and namespace cleanliness. // // Much of the math library code (src/math/* and src/complex/*) is // Copyright © 1993,2004 Sun Microsystems or // Copyright © 2003-2011 David Schultz or // Copyright © 2003-2009 Steven G. Kargl or // Copyright © 2003-2009 Bruce D. Evans or // Copyright © 2008 Stephen L. Moshier or // Copyright © 2017-2018 Arm Limited // and labelled as such in comments in the individual source files. All // have been licensed under extremely permissive terms. // // The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 // The Android Open Source Project and is licensed under a two-clause BSD // license. It was taken from Bionic libc, used on Android. // // The AArch64 memcpy and memset code (src/string/aarch64/*) are // Copyright © 1999-2019, Arm Limited. // // The implementation of DES for crypt (src/crypt/crypt_des.c) is // Copyright © 1994 David Burren. It is licensed under a BSD license. // // The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was // originally written by Solar Designer and placed into the public // domain. The code also comes with a fallback permissive license for use // in jurisdictions that may not recognize the public domain. // // The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 // Valentin Ochs and is licensed under an MIT-style license. // // The x86_64 port was written by Nicholas J. Kain and is licensed under // the standard MIT terms. // // The mips and microblaze ports were originally written by Richard // Pennington for use in the ellcc project. The original code was adapted // by Rich Felker for build system and code conventions during upstream // integration. It is licensed under the standard MIT terms. // // The mips64 port was contributed by Imagination Technologies and is // licensed under the standard MIT terms. // // The powerpc port was also originally written by Richard Pennington, // and later supplemented and integrated by John Spencer. It is licensed // under the standard MIT terms. // // All other files which have no copyright comments are original works // produced specifically for use as part of this library, written either // by Rich Felker, the main author of the library, or by one or more // contributors listed above. Details on authorship of individual files // can be found in the git version control history of the project. The // omission of copyright and license comments in each file is in the // interest of source tree size. // // In addition, permission is hereby granted for all public header files // (include/* and arch/* /bits/* ) and crt files intended to be linked into // applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit // the copyright notice and permission notice otherwise required by the // license, and to use these files without any requirement of // attribution. These files include substantial contributions from: // // Bobby Bingham // John Spencer // Nicholas J. Kain // Rich Felker // Richard Pennington // Stefan Kristiansson // Szabolcs Nagy // // all of whom have explicitly granted such permission. // // This file previously contained text expressing a belief that most of // the files covered by the above exception were sufficiently trivial not // to be subject to copyright, resulting in confusion over whether it // negated the permissions granted in the license. In the spirit of // permissive licensing, and of not having licensing issues being an // obstacle to adoption, that text has been removed. .text .attribute 4, 16 .attribute 5, "rv32im" .file "musl_memset.c" .globl memset .p2align 2 .type memset,@function memset: beqz a2, .LBB0_9memset sb a1, 0(a0) add a3, a2, a0 li a4, 3 sb a1, -1(a3) bltu a2, a4, .LBB0_9memset sb a1, 1(a0) sb a1, 2(a0) sb a1, -2(a3) li a4, 7 sb a1, -3(a3) bltu a2, a4, .LBB0_9memset sb a1, 3(a0) li a5, 9 sb a1, -4(a3) bltu a2, a5, .LBB0_9memset neg a3, a0 andi a4, a3, 3 add a3, a0, a4 sub a2, a2, a4 andi a2, a2, -4 andi a1, a1, 255 lui a4, 4112 addi a4, a4, 257 mul a1, a1, a4 sw a1, 0(a3) add a4, a3, a2 sw a1, -4(a4) bltu a2, a5, .LBB0_9memset sw a1, 4(a3) sw a1, 8(a3) sw a1, -12(a4) li a5, 25 sw a1, -8(a4) bltu a2, a5, .LBB0_9memset sw a1, 12(a3) sw a1, 16(a3) sw a1, 20(a3) sw a1, 24(a3) sw a1, -28(a4) sw a1, -24(a4) sw a1, -20(a4) andi a5, a3, 4 ori a5, a5, 24 sub a2, a2, a5 li a6, 32 sw a1, -16(a4) bltu a2, a6, .LBB0_9memset add a3, a3, a5 li a4, 31 .LBB0_8memset: sw a1, 0(a3) sw a1, 4(a3) sw a1, 8(a3) sw a1, 12(a3) sw a1, 16(a3) sw a1, 20(a3) sw a1, 24(a3) sw a1, 28(a3) addi a2, a2, -32 addi a3, a3, 32 bltu a4, a2, .LBB0_8memset .LBB0_9memset: ret .Lfunc_end0memset: .size memset, .Lfunc_end0memset-memset .ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157" .section ".note.GNU-stack","",@progbits .addrsig
SamrutGadde/CANBusBootloader
22,854
Bootloader/Core/Startup/startup_stm32f407vetx.s
/** ****************************************************************************** * @file startup_stm32f407xx.s * @author MCD Application Team * @brief STM32F407xx Devices vector table for GCC based toolchains. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M4 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ .syntax unified .cpu cortex-m4 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system initialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M3. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FSMC_IRQHandler /* FSMC */ .word SDIO_IRQHandler /* SDIO */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word ETH_IRQHandler /* Ethernet */ .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ .word OTG_HS_IRQHandler /* USB OTG HS */ .word DCMI_IRQHandler /* DCMI */ .word 0 /* CRYP crypto */ .word HASH_RNG_IRQHandler /* Hash and Rng */ .word FPU_IRQHandler /* FPU */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMP_STAMP_IRQHandler .thumb_set TAMP_STAMP_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak CAN1_TX_IRQHandler .thumb_set CAN1_TX_IRQHandler,Default_Handler .weak CAN1_RX0_IRQHandler .thumb_set CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_TIM9_IRQHandler .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler .weak TIM1_UP_TIM10_IRQHandler .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler .weak TIM1_TRG_COM_TIM11_IRQHandler .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak OTG_FS_WKUP_IRQHandler .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FSMC_IRQHandler .thumb_set FSMC_IRQHandler,Default_Handler .weak SDIO_IRQHandler .thumb_set SDIO_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_DAC_IRQHandler .thumb_set TIM6_DAC_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak ETH_IRQHandler .thumb_set ETH_IRQHandler,Default_Handler .weak ETH_WKUP_IRQHandler .thumb_set ETH_WKUP_IRQHandler,Default_Handler .weak CAN2_TX_IRQHandler .thumb_set CAN2_TX_IRQHandler,Default_Handler .weak CAN2_RX0_IRQHandler .thumb_set CAN2_RX0_IRQHandler,Default_Handler .weak CAN2_RX1_IRQHandler .thumb_set CAN2_RX1_IRQHandler,Default_Handler .weak CAN2_SCE_IRQHandler .thumb_set CAN2_SCE_IRQHandler,Default_Handler .weak OTG_FS_IRQHandler .thumb_set OTG_FS_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak OTG_HS_EP1_OUT_IRQHandler .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler .weak OTG_HS_EP1_IN_IRQHandler .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler .weak OTG_HS_WKUP_IRQHandler .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler .weak OTG_HS_IRQHandler .thumb_set OTG_HS_IRQHandler,Default_Handler .weak DCMI_IRQHandler .thumb_set DCMI_IRQHandler,Default_Handler .weak HASH_RNG_IRQHandler .thumb_set HASH_RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler
SamrutGadde/CANBusBootloader
22,854
BootloaderApp/Core/Startup/startup_stm32f407vetx.s
/** ****************************************************************************** * @file startup_stm32f407xx.s * @author MCD Application Team * @brief STM32F407xx Devices vector table for GCC based toolchains. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M4 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ .syntax unified .cpu cortex-m4 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system initialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M3. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FSMC_IRQHandler /* FSMC */ .word SDIO_IRQHandler /* SDIO */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word ETH_IRQHandler /* Ethernet */ .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ .word OTG_HS_IRQHandler /* USB OTG HS */ .word DCMI_IRQHandler /* DCMI */ .word 0 /* CRYP crypto */ .word HASH_RNG_IRQHandler /* Hash and Rng */ .word FPU_IRQHandler /* FPU */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMP_STAMP_IRQHandler .thumb_set TAMP_STAMP_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak CAN1_TX_IRQHandler .thumb_set CAN1_TX_IRQHandler,Default_Handler .weak CAN1_RX0_IRQHandler .thumb_set CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_TIM9_IRQHandler .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler .weak TIM1_UP_TIM10_IRQHandler .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler .weak TIM1_TRG_COM_TIM11_IRQHandler .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak OTG_FS_WKUP_IRQHandler .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FSMC_IRQHandler .thumb_set FSMC_IRQHandler,Default_Handler .weak SDIO_IRQHandler .thumb_set SDIO_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_DAC_IRQHandler .thumb_set TIM6_DAC_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak ETH_IRQHandler .thumb_set ETH_IRQHandler,Default_Handler .weak ETH_WKUP_IRQHandler .thumb_set ETH_WKUP_IRQHandler,Default_Handler .weak CAN2_TX_IRQHandler .thumb_set CAN2_TX_IRQHandler,Default_Handler .weak CAN2_RX0_IRQHandler .thumb_set CAN2_RX0_IRQHandler,Default_Handler .weak CAN2_RX1_IRQHandler .thumb_set CAN2_RX1_IRQHandler,Default_Handler .weak CAN2_SCE_IRQHandler .thumb_set CAN2_SCE_IRQHandler,Default_Handler .weak OTG_FS_IRQHandler .thumb_set OTG_FS_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak OTG_HS_EP1_OUT_IRQHandler .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler .weak OTG_HS_EP1_IN_IRQHandler .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler .weak OTG_HS_WKUP_IRQHandler .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler .weak OTG_HS_IRQHandler .thumb_set OTG_HS_IRQHandler,Default_Handler .weak DCMI_IRQHandler .thumb_set DCMI_IRQHandler,Default_Handler .weak HASH_RNG_IRQHandler .thumb_set HASH_RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler
Sappy12dream/eventify
15,018
.cargo/registry/src/index.crates.io-6f17d22bba15001f/zstd-sys-2.0.10+zstd.1.5.6/zstd/lib/decompress/huf_decompress_amd64.S
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "../common/portability_macros.h" #if defined(__ELF__) && defined(__GNUC__) /* Stack marking * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart */ .section .note.GNU-stack,"",%progbits #if defined(__aarch64__) /* Mark that this assembly supports BTI & PAC, because it is empty for aarch64. * See: https://github.com/facebook/zstd/issues/3841 * See: https://gcc.godbolt.org/z/sqr5T4ffK * See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/ * See: https://reviews.llvm.org/D62609 */ .pushsection .note.gnu.property, "a" .p2align 3 .long 4 /* size of the name - "GNU\0" */ .long 0x10 /* size of descriptor */ .long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */ .asciz "GNU" .long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */ .long 4 /* pr_datasz - 4 bytes */ .long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */ .p2align 3 /* pr_padding - bring everything to 8 byte alignment */ .popsection #endif #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 /* Calling convention: * * %rdi contains the first argument: HUF_DecompressAsmArgs*. * %rbp isn't maintained (no frame pointer). * %rsp contains the stack pointer that grows down. * No red-zone is assumed, only addresses >= %rsp are used. * All register contents are preserved. * * TODO: Support Windows calling convention. */ ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) .global HUF_decompress4X1_usingDTable_internal_fast_asm_loop .global HUF_decompress4X2_usingDTable_internal_fast_asm_loop .global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop .global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop .text /* Sets up register mappings for clarity. * op[], bits[], dtable & ip[0] each get their own register. * ip[1,2,3] & olimit alias var[]. * %rax is a scratch register. */ #define op0 rsi #define op1 rbx #define op2 rcx #define op3 rdi #define ip0 r8 #define ip1 r9 #define ip2 r10 #define ip3 r11 #define bits0 rbp #define bits1 rdx #define bits2 r12 #define bits3 r13 #define dtable r14 #define olimit r15 /* var[] aliases ip[1,2,3] & olimit * ip[1,2,3] are saved every iteration. * olimit is only used in compute_olimit. */ #define var0 r15 #define var1 r9 #define var2 r10 #define var3 r11 /* 32-bit var registers */ #define vard0 r15d #define vard1 r9d #define vard2 r10d #define vard3 r11d /* Calls X(N) for each stream 0, 1, 2, 3. */ #define FOR_EACH_STREAM(X) \ X(0); \ X(1); \ X(2); \ X(3) /* Calls X(N, idx) for each stream 0, 1, 2, 3. */ #define FOR_EACH_STREAM_WITH_INDEX(X, idx) \ X(0, idx); \ X(1, idx); \ X(2, idx); \ X(3, idx) /* Define both _HUF_* & HUF_* symbols because MacOS * C symbols are prefixed with '_' & Linux symbols aren't. */ _HUF_decompress4X1_usingDTable_internal_fast_asm_loop: HUF_decompress4X1_usingDTable_internal_fast_asm_loop: ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx push %rcx push %rdx push %rbp push %rsi push %rdi push %r8 push %r9 push %r10 push %r11 push %r12 push %r13 push %r14 push %r15 /* Read HUF_DecompressAsmArgs* args from %rax */ movq %rdi, %rax movq 0(%rax), %ip0 movq 8(%rax), %ip1 movq 16(%rax), %ip2 movq 24(%rax), %ip3 movq 32(%rax), %op0 movq 40(%rax), %op1 movq 48(%rax), %op2 movq 56(%rax), %op3 movq 64(%rax), %bits0 movq 72(%rax), %bits1 movq 80(%rax), %bits2 movq 88(%rax), %bits3 movq 96(%rax), %dtable push %rax /* argument */ push 104(%rax) /* ilowest */ push 112(%rax) /* oend */ push %olimit /* olimit space */ subq $24, %rsp .L_4X1_compute_olimit: /* Computes how many iterations we can do safely * %r15, %rax may be clobbered * rbx, rdx must be saved * op3 & ip0 mustn't be clobbered */ movq %rbx, 0(%rsp) movq %rdx, 8(%rsp) movq 32(%rsp), %rax /* rax = oend */ subq %op3, %rax /* rax = oend - op3 */ /* r15 = (oend - op3) / 5 */ movabsq $-3689348814741910323, %rdx mulq %rdx movq %rdx, %r15 shrq $2, %r15 movq %ip0, %rax /* rax = ip0 */ movq 40(%rsp), %rdx /* rdx = ilowest */ subq %rdx, %rax /* rax = ip0 - ilowest */ movq %rax, %rbx /* rbx = ip0 - ilowest */ /* rdx = (ip0 - ilowest) / 7 */ movabsq $2635249153387078803, %rdx mulq %rdx subq %rdx, %rbx shrq %rbx addq %rbx, %rdx shrq $2, %rdx /* r15 = min(%rdx, %r15) */ cmpq %rdx, %r15 cmova %rdx, %r15 /* r15 = r15 * 5 */ leaq (%r15, %r15, 4), %r15 /* olimit = op3 + r15 */ addq %op3, %olimit movq 8(%rsp), %rdx movq 0(%rsp), %rbx /* If (op3 + 20 > olimit) */ movq %op3, %rax /* rax = op3 */ cmpq %rax, %olimit /* op3 == olimit */ je .L_4X1_exit /* If (ip1 < ip0) go to exit */ cmpq %ip0, %ip1 jb .L_4X1_exit /* If (ip2 < ip1) go to exit */ cmpq %ip1, %ip2 jb .L_4X1_exit /* If (ip3 < ip2) go to exit */ cmpq %ip2, %ip3 jb .L_4X1_exit /* Reads top 11 bits from bits[n] * Loads dt[bits[n]] into var[n] */ #define GET_NEXT_DELT(n) \ movq $53, %var##n; \ shrxq %var##n, %bits##n, %var##n; \ movzwl (%dtable,%var##n,2),%vard##n /* var[n] must contain the DTable entry computed with GET_NEXT_DELT * Moves var[n] to %rax * bits[n] <<= var[n] & 63 * op[n][idx] = %rax >> 8 * %ah is a way to access bits [8, 16) of %rax */ #define DECODE_FROM_DELT(n, idx) \ movq %var##n, %rax; \ shlxq %var##n, %bits##n, %bits##n; \ movb %ah, idx(%op##n) /* Assumes GET_NEXT_DELT has been called. * Calls DECODE_FROM_DELT then GET_NEXT_DELT */ #define DECODE_AND_GET_NEXT(n, idx) \ DECODE_FROM_DELT(n, idx); \ GET_NEXT_DELT(n) \ /* // ctz & nbBytes is stored in bits[n] * // nbBits is stored in %rax * ctz = CTZ[bits[n]] * nbBits = ctz & 7 * nbBytes = ctz >> 3 * op[n] += 5 * ip[n] -= nbBytes * // Note: x86-64 is little-endian ==> no bswap * bits[n] = MEM_readST(ip[n]) | 1 * bits[n] <<= nbBits */ #define RELOAD_BITS(n) \ bsfq %bits##n, %bits##n; \ movq %bits##n, %rax; \ andq $7, %rax; \ shrq $3, %bits##n; \ leaq 5(%op##n), %op##n; \ subq %bits##n, %ip##n; \ movq (%ip##n), %bits##n; \ orq $1, %bits##n; \ shlx %rax, %bits##n, %bits##n /* Store clobbered variables on the stack */ movq %olimit, 24(%rsp) movq %ip1, 0(%rsp) movq %ip2, 8(%rsp) movq %ip3, 16(%rsp) /* Call GET_NEXT_DELT for each stream */ FOR_EACH_STREAM(GET_NEXT_DELT) .p2align 6 .L_4X1_loop_body: /* Decode 5 symbols in each of the 4 streams (20 total) * Must have called GET_NEXT_DELT for each stream */ FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0) FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1) FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2) FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3) FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4) /* Load ip[1,2,3] from stack (var[] aliases them) * ip[] is needed for RELOAD_BITS * Each will be stored back to the stack after RELOAD */ movq 0(%rsp), %ip1 movq 8(%rsp), %ip2 movq 16(%rsp), %ip3 /* Reload each stream & fetch the next table entry * to prepare for the next iteration */ RELOAD_BITS(0) GET_NEXT_DELT(0) RELOAD_BITS(1) movq %ip1, 0(%rsp) GET_NEXT_DELT(1) RELOAD_BITS(2) movq %ip2, 8(%rsp) GET_NEXT_DELT(2) RELOAD_BITS(3) movq %ip3, 16(%rsp) GET_NEXT_DELT(3) /* If op3 < olimit: continue the loop */ cmp %op3, 24(%rsp) ja .L_4X1_loop_body /* Reload ip[1,2,3] from stack */ movq 0(%rsp), %ip1 movq 8(%rsp), %ip2 movq 16(%rsp), %ip3 /* Re-compute olimit */ jmp .L_4X1_compute_olimit #undef GET_NEXT_DELT #undef DECODE_FROM_DELT #undef DECODE #undef RELOAD_BITS .L_4X1_exit: addq $24, %rsp /* Restore stack (oend & olimit) */ pop %rax /* olimit */ pop %rax /* oend */ pop %rax /* ilowest */ pop %rax /* arg */ /* Save ip / op / bits */ movq %ip0, 0(%rax) movq %ip1, 8(%rax) movq %ip2, 16(%rax) movq %ip3, 24(%rax) movq %op0, 32(%rax) movq %op1, 40(%rax) movq %op2, 48(%rax) movq %op3, 56(%rax) movq %bits0, 64(%rax) movq %bits1, 72(%rax) movq %bits2, 80(%rax) movq %bits3, 88(%rax) /* Restore registers */ pop %r15 pop %r14 pop %r13 pop %r12 pop %r11 pop %r10 pop %r9 pop %r8 pop %rdi pop %rsi pop %rbp pop %rdx pop %rcx pop %rbx pop %rax ret _HUF_decompress4X2_usingDTable_internal_fast_asm_loop: HUF_decompress4X2_usingDTable_internal_fast_asm_loop: ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx push %rcx push %rdx push %rbp push %rsi push %rdi push %r8 push %r9 push %r10 push %r11 push %r12 push %r13 push %r14 push %r15 movq %rdi, %rax movq 0(%rax), %ip0 movq 8(%rax), %ip1 movq 16(%rax), %ip2 movq 24(%rax), %ip3 movq 32(%rax), %op0 movq 40(%rax), %op1 movq 48(%rax), %op2 movq 56(%rax), %op3 movq 64(%rax), %bits0 movq 72(%rax), %bits1 movq 80(%rax), %bits2 movq 88(%rax), %bits3 movq 96(%rax), %dtable push %rax /* argument */ push %rax /* olimit */ push 104(%rax) /* ilowest */ movq 112(%rax), %rax push %rax /* oend3 */ movq %op3, %rax push %rax /* oend2 */ movq %op2, %rax push %rax /* oend1 */ movq %op1, %rax push %rax /* oend0 */ /* Scratch space */ subq $8, %rsp .L_4X2_compute_olimit: /* Computes how many iterations we can do safely * %r15, %rax may be clobbered * rdx must be saved * op[1,2,3,4] & ip0 mustn't be clobbered */ movq %rdx, 0(%rsp) /* We can consume up to 7 input bytes each iteration. */ movq %ip0, %rax /* rax = ip0 */ movq 40(%rsp), %rdx /* rdx = ilowest */ subq %rdx, %rax /* rax = ip0 - ilowest */ movq %rax, %r15 /* r15 = ip0 - ilowest */ /* rdx = rax / 7 */ movabsq $2635249153387078803, %rdx mulq %rdx subq %rdx, %r15 shrq %r15 addq %r15, %rdx shrq $2, %rdx /* r15 = (ip0 - ilowest) / 7 */ movq %rdx, %r15 /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ movq 8(%rsp), %rax /* rax = oend0 */ subq %op0, %rax /* rax = oend0 - op0 */ movq 16(%rsp), %rdx /* rdx = oend1 */ subq %op1, %rdx /* rdx = oend1 - op1 */ cmpq %rax, %rdx cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ movq 24(%rsp), %rax /* rax = oend2 */ subq %op2, %rax /* rax = oend2 - op2 */ cmpq %rax, %rdx cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ movq 32(%rsp), %rax /* rax = oend3 */ subq %op3, %rax /* rax = oend3 - op3 */ cmpq %rax, %rdx cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ movabsq $-3689348814741910323, %rax mulq %rdx shrq $3, %rdx /* rdx = rdx / 10 */ /* r15 = min(%rdx, %r15) */ cmpq %rdx, %r15 cmova %rdx, %r15 /* olimit = op3 + 5 * r15 */ movq %r15, %rax leaq (%op3, %rax, 4), %olimit addq %rax, %olimit movq 0(%rsp), %rdx /* If (op3 + 10 > olimit) */ movq %op3, %rax /* rax = op3 */ cmpq %rax, %olimit /* op3 == olimit */ je .L_4X2_exit /* If (ip1 < ip0) go to exit */ cmpq %ip0, %ip1 jb .L_4X2_exit /* If (ip2 < ip1) go to exit */ cmpq %ip1, %ip2 jb .L_4X2_exit /* If (ip3 < ip2) go to exit */ cmpq %ip2, %ip3 jb .L_4X2_exit #define DECODE(n, idx) \ movq %bits##n, %rax; \ shrq $53, %rax; \ movzwl 0(%dtable,%rax,4),%r8d; \ movzbl 2(%dtable,%rax,4),%r15d; \ movzbl 3(%dtable,%rax,4),%eax; \ movw %r8w, (%op##n); \ shlxq %r15, %bits##n, %bits##n; \ addq %rax, %op##n #define RELOAD_BITS(n) \ bsfq %bits##n, %bits##n; \ movq %bits##n, %rax; \ shrq $3, %bits##n; \ andq $7, %rax; \ subq %bits##n, %ip##n; \ movq (%ip##n), %bits##n; \ orq $1, %bits##n; \ shlxq %rax, %bits##n, %bits##n movq %olimit, 48(%rsp) .p2align 6 .L_4X2_loop_body: /* We clobber r8, so store it on the stack */ movq %r8, 0(%rsp) /* Decode 5 symbols from each of the 4 streams (20 symbols total). */ FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) /* Reload r8 */ movq 0(%rsp), %r8 FOR_EACH_STREAM(RELOAD_BITS) cmp %op3, 48(%rsp) ja .L_4X2_loop_body jmp .L_4X2_compute_olimit #undef DECODE #undef RELOAD_BITS .L_4X2_exit: addq $8, %rsp /* Restore stack (oend & olimit) */ pop %rax /* oend0 */ pop %rax /* oend1 */ pop %rax /* oend2 */ pop %rax /* oend3 */ pop %rax /* ilowest */ pop %rax /* olimit */ pop %rax /* arg */ /* Save ip / op / bits */ movq %ip0, 0(%rax) movq %ip1, 8(%rax) movq %ip2, 16(%rax) movq %ip3, 24(%rax) movq %op0, 32(%rax) movq %op1, 40(%rax) movq %op2, 48(%rax) movq %op3, 56(%rax) movq %bits0, 64(%rax) movq %bits1, 72(%rax) movq %bits2, 80(%rax) movq %bits3, 88(%rax) /* Restore registers */ pop %r15 pop %r14 pop %r13 pop %r12 pop %r11 pop %r10 pop %r9 pop %r8 pop %rdi pop %rsi pop %rbp pop %rdx pop %rcx pop %rbx pop %rax ret #endif
Sappy12dream/eventify
15,018
.cargo/registry/src/index.crates.io-6f17d22bba15001f/zstd-sys-2.0.10+zstd.1.5.6/zstd/lib/decompress/huf_decompress_amd64.S
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "../common/portability_macros.h" #if defined(__ELF__) && defined(__GNUC__) /* Stack marking * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart */ .section .note.GNU-stack,"",%progbits #if defined(__aarch64__) /* Mark that this assembly supports BTI & PAC, because it is empty for aarch64. * See: https://github.com/facebook/zstd/issues/3841 * See: https://gcc.godbolt.org/z/sqr5T4ffK * See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/ * See: https://reviews.llvm.org/D62609 */ .pushsection .note.gnu.property, "a" .p2align 3 .long 4 /* size of the name - "GNU\0" */ .long 0x10 /* size of descriptor */ .long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */ .asciz "GNU" .long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */ .long 4 /* pr_datasz - 4 bytes */ .long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */ .p2align 3 /* pr_padding - bring everything to 8 byte alignment */ .popsection #endif #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 /* Calling convention: * * %rdi contains the first argument: HUF_DecompressAsmArgs*. * %rbp isn't maintained (no frame pointer). * %rsp contains the stack pointer that grows down. * No red-zone is assumed, only addresses >= %rsp are used. * All register contents are preserved. * * TODO: Support Windows calling convention. */ ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) .global HUF_decompress4X1_usingDTable_internal_fast_asm_loop .global HUF_decompress4X2_usingDTable_internal_fast_asm_loop .global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop .global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop .text /* Sets up register mappings for clarity. * op[], bits[], dtable & ip[0] each get their own register. * ip[1,2,3] & olimit alias var[]. * %rax is a scratch register. */ #define op0 rsi #define op1 rbx #define op2 rcx #define op3 rdi #define ip0 r8 #define ip1 r9 #define ip2 r10 #define ip3 r11 #define bits0 rbp #define bits1 rdx #define bits2 r12 #define bits3 r13 #define dtable r14 #define olimit r15 /* var[] aliases ip[1,2,3] & olimit * ip[1,2,3] are saved every iteration. * olimit is only used in compute_olimit. */ #define var0 r15 #define var1 r9 #define var2 r10 #define var3 r11 /* 32-bit var registers */ #define vard0 r15d #define vard1 r9d #define vard2 r10d #define vard3 r11d /* Calls X(N) for each stream 0, 1, 2, 3. */ #define FOR_EACH_STREAM(X) \ X(0); \ X(1); \ X(2); \ X(3) /* Calls X(N, idx) for each stream 0, 1, 2, 3. */ #define FOR_EACH_STREAM_WITH_INDEX(X, idx) \ X(0, idx); \ X(1, idx); \ X(2, idx); \ X(3, idx) /* Define both _HUF_* & HUF_* symbols because MacOS * C symbols are prefixed with '_' & Linux symbols aren't. */ _HUF_decompress4X1_usingDTable_internal_fast_asm_loop: HUF_decompress4X1_usingDTable_internal_fast_asm_loop: ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx push %rcx push %rdx push %rbp push %rsi push %rdi push %r8 push %r9 push %r10 push %r11 push %r12 push %r13 push %r14 push %r15 /* Read HUF_DecompressAsmArgs* args from %rax */ movq %rdi, %rax movq 0(%rax), %ip0 movq 8(%rax), %ip1 movq 16(%rax), %ip2 movq 24(%rax), %ip3 movq 32(%rax), %op0 movq 40(%rax), %op1 movq 48(%rax), %op2 movq 56(%rax), %op3 movq 64(%rax), %bits0 movq 72(%rax), %bits1 movq 80(%rax), %bits2 movq 88(%rax), %bits3 movq 96(%rax), %dtable push %rax /* argument */ push 104(%rax) /* ilowest */ push 112(%rax) /* oend */ push %olimit /* olimit space */ subq $24, %rsp .L_4X1_compute_olimit: /* Computes how many iterations we can do safely * %r15, %rax may be clobbered * rbx, rdx must be saved * op3 & ip0 mustn't be clobbered */ movq %rbx, 0(%rsp) movq %rdx, 8(%rsp) movq 32(%rsp), %rax /* rax = oend */ subq %op3, %rax /* rax = oend - op3 */ /* r15 = (oend - op3) / 5 */ movabsq $-3689348814741910323, %rdx mulq %rdx movq %rdx, %r15 shrq $2, %r15 movq %ip0, %rax /* rax = ip0 */ movq 40(%rsp), %rdx /* rdx = ilowest */ subq %rdx, %rax /* rax = ip0 - ilowest */ movq %rax, %rbx /* rbx = ip0 - ilowest */ /* rdx = (ip0 - ilowest) / 7 */ movabsq $2635249153387078803, %rdx mulq %rdx subq %rdx, %rbx shrq %rbx addq %rbx, %rdx shrq $2, %rdx /* r15 = min(%rdx, %r15) */ cmpq %rdx, %r15 cmova %rdx, %r15 /* r15 = r15 * 5 */ leaq (%r15, %r15, 4), %r15 /* olimit = op3 + r15 */ addq %op3, %olimit movq 8(%rsp), %rdx movq 0(%rsp), %rbx /* If (op3 + 20 > olimit) */ movq %op3, %rax /* rax = op3 */ cmpq %rax, %olimit /* op3 == olimit */ je .L_4X1_exit /* If (ip1 < ip0) go to exit */ cmpq %ip0, %ip1 jb .L_4X1_exit /* If (ip2 < ip1) go to exit */ cmpq %ip1, %ip2 jb .L_4X1_exit /* If (ip3 < ip2) go to exit */ cmpq %ip2, %ip3 jb .L_4X1_exit /* Reads top 11 bits from bits[n] * Loads dt[bits[n]] into var[n] */ #define GET_NEXT_DELT(n) \ movq $53, %var##n; \ shrxq %var##n, %bits##n, %var##n; \ movzwl (%dtable,%var##n,2),%vard##n /* var[n] must contain the DTable entry computed with GET_NEXT_DELT * Moves var[n] to %rax * bits[n] <<= var[n] & 63 * op[n][idx] = %rax >> 8 * %ah is a way to access bits [8, 16) of %rax */ #define DECODE_FROM_DELT(n, idx) \ movq %var##n, %rax; \ shlxq %var##n, %bits##n, %bits##n; \ movb %ah, idx(%op##n) /* Assumes GET_NEXT_DELT has been called. * Calls DECODE_FROM_DELT then GET_NEXT_DELT */ #define DECODE_AND_GET_NEXT(n, idx) \ DECODE_FROM_DELT(n, idx); \ GET_NEXT_DELT(n) \ /* // ctz & nbBytes is stored in bits[n] * // nbBits is stored in %rax * ctz = CTZ[bits[n]] * nbBits = ctz & 7 * nbBytes = ctz >> 3 * op[n] += 5 * ip[n] -= nbBytes * // Note: x86-64 is little-endian ==> no bswap * bits[n] = MEM_readST(ip[n]) | 1 * bits[n] <<= nbBits */ #define RELOAD_BITS(n) \ bsfq %bits##n, %bits##n; \ movq %bits##n, %rax; \ andq $7, %rax; \ shrq $3, %bits##n; \ leaq 5(%op##n), %op##n; \ subq %bits##n, %ip##n; \ movq (%ip##n), %bits##n; \ orq $1, %bits##n; \ shlx %rax, %bits##n, %bits##n /* Store clobbered variables on the stack */ movq %olimit, 24(%rsp) movq %ip1, 0(%rsp) movq %ip2, 8(%rsp) movq %ip3, 16(%rsp) /* Call GET_NEXT_DELT for each stream */ FOR_EACH_STREAM(GET_NEXT_DELT) .p2align 6 .L_4X1_loop_body: /* Decode 5 symbols in each of the 4 streams (20 total) * Must have called GET_NEXT_DELT for each stream */ FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0) FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1) FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2) FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3) FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4) /* Load ip[1,2,3] from stack (var[] aliases them) * ip[] is needed for RELOAD_BITS * Each will be stored back to the stack after RELOAD */ movq 0(%rsp), %ip1 movq 8(%rsp), %ip2 movq 16(%rsp), %ip3 /* Reload each stream & fetch the next table entry * to prepare for the next iteration */ RELOAD_BITS(0) GET_NEXT_DELT(0) RELOAD_BITS(1) movq %ip1, 0(%rsp) GET_NEXT_DELT(1) RELOAD_BITS(2) movq %ip2, 8(%rsp) GET_NEXT_DELT(2) RELOAD_BITS(3) movq %ip3, 16(%rsp) GET_NEXT_DELT(3) /* If op3 < olimit: continue the loop */ cmp %op3, 24(%rsp) ja .L_4X1_loop_body /* Reload ip[1,2,3] from stack */ movq 0(%rsp), %ip1 movq 8(%rsp), %ip2 movq 16(%rsp), %ip3 /* Re-compute olimit */ jmp .L_4X1_compute_olimit #undef GET_NEXT_DELT #undef DECODE_FROM_DELT #undef DECODE #undef RELOAD_BITS .L_4X1_exit: addq $24, %rsp /* Restore stack (oend & olimit) */ pop %rax /* olimit */ pop %rax /* oend */ pop %rax /* ilowest */ pop %rax /* arg */ /* Save ip / op / bits */ movq %ip0, 0(%rax) movq %ip1, 8(%rax) movq %ip2, 16(%rax) movq %ip3, 24(%rax) movq %op0, 32(%rax) movq %op1, 40(%rax) movq %op2, 48(%rax) movq %op3, 56(%rax) movq %bits0, 64(%rax) movq %bits1, 72(%rax) movq %bits2, 80(%rax) movq %bits3, 88(%rax) /* Restore registers */ pop %r15 pop %r14 pop %r13 pop %r12 pop %r11 pop %r10 pop %r9 pop %r8 pop %rdi pop %rsi pop %rbp pop %rdx pop %rcx pop %rbx pop %rax ret _HUF_decompress4X2_usingDTable_internal_fast_asm_loop: HUF_decompress4X2_usingDTable_internal_fast_asm_loop: ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx push %rcx push %rdx push %rbp push %rsi push %rdi push %r8 push %r9 push %r10 push %r11 push %r12 push %r13 push %r14 push %r15 movq %rdi, %rax movq 0(%rax), %ip0 movq 8(%rax), %ip1 movq 16(%rax), %ip2 movq 24(%rax), %ip3 movq 32(%rax), %op0 movq 40(%rax), %op1 movq 48(%rax), %op2 movq 56(%rax), %op3 movq 64(%rax), %bits0 movq 72(%rax), %bits1 movq 80(%rax), %bits2 movq 88(%rax), %bits3 movq 96(%rax), %dtable push %rax /* argument */ push %rax /* olimit */ push 104(%rax) /* ilowest */ movq 112(%rax), %rax push %rax /* oend3 */ movq %op3, %rax push %rax /* oend2 */ movq %op2, %rax push %rax /* oend1 */ movq %op1, %rax push %rax /* oend0 */ /* Scratch space */ subq $8, %rsp .L_4X2_compute_olimit: /* Computes how many iterations we can do safely * %r15, %rax may be clobbered * rdx must be saved * op[1,2,3,4] & ip0 mustn't be clobbered */ movq %rdx, 0(%rsp) /* We can consume up to 7 input bytes each iteration. */ movq %ip0, %rax /* rax = ip0 */ movq 40(%rsp), %rdx /* rdx = ilowest */ subq %rdx, %rax /* rax = ip0 - ilowest */ movq %rax, %r15 /* r15 = ip0 - ilowest */ /* rdx = rax / 7 */ movabsq $2635249153387078803, %rdx mulq %rdx subq %rdx, %r15 shrq %r15 addq %r15, %rdx shrq $2, %rdx /* r15 = (ip0 - ilowest) / 7 */ movq %rdx, %r15 /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ movq 8(%rsp), %rax /* rax = oend0 */ subq %op0, %rax /* rax = oend0 - op0 */ movq 16(%rsp), %rdx /* rdx = oend1 */ subq %op1, %rdx /* rdx = oend1 - op1 */ cmpq %rax, %rdx cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ movq 24(%rsp), %rax /* rax = oend2 */ subq %op2, %rax /* rax = oend2 - op2 */ cmpq %rax, %rdx cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ movq 32(%rsp), %rax /* rax = oend3 */ subq %op3, %rax /* rax = oend3 - op3 */ cmpq %rax, %rdx cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ movabsq $-3689348814741910323, %rax mulq %rdx shrq $3, %rdx /* rdx = rdx / 10 */ /* r15 = min(%rdx, %r15) */ cmpq %rdx, %r15 cmova %rdx, %r15 /* olimit = op3 + 5 * r15 */ movq %r15, %rax leaq (%op3, %rax, 4), %olimit addq %rax, %olimit movq 0(%rsp), %rdx /* If (op3 + 10 > olimit) */ movq %op3, %rax /* rax = op3 */ cmpq %rax, %olimit /* op3 == olimit */ je .L_4X2_exit /* If (ip1 < ip0) go to exit */ cmpq %ip0, %ip1 jb .L_4X2_exit /* If (ip2 < ip1) go to exit */ cmpq %ip1, %ip2 jb .L_4X2_exit /* If (ip3 < ip2) go to exit */ cmpq %ip2, %ip3 jb .L_4X2_exit #define DECODE(n, idx) \ movq %bits##n, %rax; \ shrq $53, %rax; \ movzwl 0(%dtable,%rax,4),%r8d; \ movzbl 2(%dtable,%rax,4),%r15d; \ movzbl 3(%dtable,%rax,4),%eax; \ movw %r8w, (%op##n); \ shlxq %r15, %bits##n, %bits##n; \ addq %rax, %op##n #define RELOAD_BITS(n) \ bsfq %bits##n, %bits##n; \ movq %bits##n, %rax; \ shrq $3, %bits##n; \ andq $7, %rax; \ subq %bits##n, %ip##n; \ movq (%ip##n), %bits##n; \ orq $1, %bits##n; \ shlxq %rax, %bits##n, %bits##n movq %olimit, 48(%rsp) .p2align 6 .L_4X2_loop_body: /* We clobber r8, so store it on the stack */ movq %r8, 0(%rsp) /* Decode 5 symbols from each of the 4 streams (20 symbols total). */ FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) /* Reload r8 */ movq 0(%rsp), %r8 FOR_EACH_STREAM(RELOAD_BITS) cmp %op3, 48(%rsp) ja .L_4X2_loop_body jmp .L_4X2_compute_olimit #undef DECODE #undef RELOAD_BITS .L_4X2_exit: addq $8, %rsp /* Restore stack (oend & olimit) */ pop %rax /* oend0 */ pop %rax /* oend1 */ pop %rax /* oend2 */ pop %rax /* oend3 */ pop %rax /* ilowest */ pop %rax /* olimit */ pop %rax /* arg */ /* Save ip / op / bits */ movq %ip0, 0(%rax) movq %ip1, 8(%rax) movq %ip2, 16(%rax) movq %ip3, 24(%rax) movq %op0, 32(%rax) movq %op1, 40(%rax) movq %op2, 48(%rax) movq %op3, 56(%rax) movq %bits0, 64(%rax) movq %bits1, 72(%rax) movq %bits2, 80(%rax) movq %bits3, 88(%rax) /* Restore registers */ pop %r15 pop %r14 pop %r13 pop %r12 pop %r11 pop %r10 pop %r9 pop %r8 pop %rdi pop %rsi pop %rbp pop %rdx pop %rcx pop %rbx pop %rax ret #endif
SavingHeaume/rust_oskernel
3,385
kernel/src/trap/trap.S
.altmacro .macro SAVE_GP n # 宏定义:将寄存器 x\n 存储到栈偏移 \n*8 的位置 sd x\n, \n*8(sp) .endm .macro LOAD_GP n # 宏定义:从栈偏移 \n*8 的位置加载数据到 x\n ld x\n, \n*8(sp) .endm .section .text.trampoline # 定义代码段 .text.trampoline .globl __alltraps # 导出用户态 Trap 入口符号 .globl __restore # 导出用户态 Trap 恢复符号 .globl __alltraps_k # 导出内核态 Trap 入口符号 .globl __restore_k # 导出内核态 Trap 恢复符号 .align 2 # 按 4 字节对齐代码 # ==================== 用户态 Trap 处理 ==================== __alltraps: csrrw sp, sscratch, sp # 交换 sp 和 sscratch,此时 sp 指向用户 Trap 上下文 sd x1, 1*8(sp) # 保存返回地址 (x1/ra) sd x3, 3*8(sp) # 保存全局指针 (x3/gp),跳过 x2(sp)/x4(tp) .set n, 5 # 从 x5 开始循环保存通用寄存器 .rept 27 # 保存 x5~x31(共27个寄存器) SAVE_GP %n .set n, n+1 .endr # 保存关键控制寄存器 csrr t0, sstatus # 读取当前状态寄存器 sstatus csrr t1, sepc # 读取异常程序计数器 sepc sd t0, 32*8(sp) # 存储 sstatus 到 Trap 上下文第32项 sd t1, 33*8(sp) # 存储 sepc 到第33项 csrr t2, sscratch # 从 sscratch 获取用户栈指针 sd t2, 2*8(sp) # 保存用户栈指针到 Trap 上下文的 x2 位置 # 切换到内核环境 ld t0, 34*8(sp) # 加载内核页表寄存器 satp 的值 ld t1, 36*8(sp) # 加载用户态 Trap 处理函数地址 ld sp, 35*8(sp) # 切换 sp 到内核栈 csrw satp, t0 # 设置 satp 切换到内核地址空间 sfence.vma # 刷新 TLB 确保地址空间切换生效 jr t1 # 跳转到用户态 Trap 处理函数 __restore: csrw satp, a1 # 恢复用户页表(a1 为用户空间 satp 值) sfence.vma # 刷新 TLB csrw sscratch, a0 # 将用户 Trap 上下文地址存入 sscratch mv sp, a0 # sp 指向用户 Trap 上下文 # 恢复控制寄存器 ld t0, 32*8(sp) # 加载保存的 sstatus ld t1, 33*8(sp) # 加载保存的 sepc csrw sstatus, t0 # 恢复 sstatus csrw sepc, t1 # 恢复 sepc # 恢复通用寄存器 ld x1, 1*8(sp) # 恢复返回地址 (x1/ra) ld x3, 3*8(sp) # 恢复全局指针 (x3/gp) .set n, 5 .rept 27 # 恢复 x5~x31 LOAD_GP %n .set n, n+1 .endr ld sp, 2*8(sp) # 恢复用户栈指针(从 Trap 上下文的 x2 位置) sret # 返回用户态执行 # ==================== 内核态 Trap 处理 ==================== .align 2 __alltraps_k: addi sp, sp, -34*8 # 在内核栈分配 34 项空间(x1~x31 + sstatus + sepc) sd x1, 1*8(sp) # 保存返回地址 (x1/ra) sd x3, 3*8(sp) # 保存全局指针 (x3/gp) .set n, 5 .rept 27 # 保存 x5~x31 SAVE_GP %n .set n, n+1 .endr # 保存内核态控制寄存器 csrr t0, sstatus # 读取当前 sstatus csrr t1, sepc # 读取当前 sepc sd t0, 32*8(sp) # 存储 sstatus 到栈第32项 sd t1, 33*8(sp) # 存储 sepc 到栈第33项 mv a0, sp # 将栈指针作为参数传递给处理函数(a0) csrr t2, sscratch # 从 sscratch 读取内核处理函数地址 jalr t2 # 跳转到处理函数,同时保存返回地址到 ra __restore_k: # 恢复内核态控制寄存器 ld t0, 32*8(sp) # 加载保存的 sstatus ld t1, 33*8(sp) # 加载保存的 sepc csrw sstatus, t0 # 恢复 sstatus csrw sepc, t1 # 恢复 sepc # 恢复通用寄存器 ld x1, 1*8(sp) # 恢复返回地址 (x1/ra) ld x3, 3*8(sp) # 恢复全局指针 (x3/gp) .set n, 5 .rept 27 # 恢复 x5~x31 LOAD_GP %n .set n, n+1 .endr addi sp, sp, 34*8 # 释放内核栈空间(34 项 * 8 字节) sret # 返回内核态继续执行
SavingHeaume/rust_oskernel
757
kernel/src/task/switch.S
.altmacro .macro SAVE_SN n sd s\n, (\n+2)*8(a0) .endm .macro LOAD_SN n ld s\n, (\n+2)*8(a1) .endm .section .text .globl __switch __switch: # 保存当前任务上下文 sd sp, 8(a0) # 保存当前内核栈指针 sp 到 TaskContext.sp 字段(偏移 8) sd ra, 0(a0) # 保存返回地址 ra 到 TaskContext.ra 字段(偏移 0) .set n, 0 # 初始化计数器 n=0(对应 s0 寄存器) .rept 12 # 重复 12 次(保存 s0~s11)调用宏 SAVE_SN,保存 s[n] 到偏移 (n+2)*8 处 SAVE_SN %n .set n, n + 1 .endr # 加载下一个任务上下文 ld ra, 0(a1) # 从下一个任务的 TaskContext.ra 加载返回地址 .set n, 0 # 初始化计数器 n=0 .rept 12 # 重复 12 次(加载 s0~s11) LOAD_SN %n # 调用宏 LOAD_SN,从偏移 (n+2)*8 处加载到 s[n] .set n, n + 1 .endr ld sp, 8(a1) # 加载下一个任务的内核栈指针 sp # 切换控制流 ret # 跳转到 ra 寄存器指向的地址(即下一个任务的执行点)
Sc1pex/rustos
2,548
kernel/src/exception/exception.S
.macro CALL_WITH_CONTEXT handler __vector_\handler: // Make room for registers sub sp, sp, #16 * 17 stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7] stp x16, x17, [sp, #16 * 8] stp x18, x19, [sp, #16 * 9] stp x20, x21, [sp, #16 * 10] stp x22, x23, [sp, #16 * 11] stp x24, x25, [sp, #16 * 12] stp x26, x27, [sp, #16 * 13] stp x28, x29, [sp, #16 * 14] mrs x1, ELR_EL1 mrs x2, SPSR_EL1 mrs x3, ESR_EL1 stp lr, x1, [sp, #16 * 15] stp x2, x3, [sp, #16 * 16] // x0 is the first argument for a function mov x0, sp bl \handler b __exception_restore_context .endm .section .text .align 11 .global __exception_vector_start __exception_vector_start: // Exceptions from current EL while using SP_EL0 .org 0x000 CALL_WITH_CONTEXT current_el0_sync .org 0x080 CALL_WITH_CONTEXT current_el0_irq .org 0x100 CALL_WITH_CONTEXT current_el0_fiq .org 0x180 CALL_WITH_CONTEXT current_el0_serror // Exceptions from cuurrent EL while using SP_ELx, x != 0 .org 0x200 CALL_WITH_CONTEXT current_elx_sync .org 0x280 CALL_WITH_CONTEXT current_elx_irq .org 0x300 CALL_WITH_CONTEXT current_elx_fiq .org 0x380 CALL_WITH_CONTEXT current_elx_serror // Exceptions from a lowe EL, AArch64 .org 0x400 CALL_WITH_CONTEXT lower_el_aarch64_sync .org 0x480 CALL_WITH_CONTEXT lower_el_aarch64_irq .org 0x500 CALL_WITH_CONTEXT lower_el_aarch64_fiq .org 0x580 CALL_WITH_CONTEXT lower_el_aarch64_serror // Exceptions from a lowe EL, AArch32 // These are probably impossible .org 0x600 CALL_WITH_CONTEXT lower_el_aarch32_sync .org 0x680 CALL_WITH_CONTEXT lower_el_aarch32_irq .org 0x700 CALL_WITH_CONTEXT lower_el_aarch32_fiq .org 0x780 CALL_WITH_CONTEXT lower_el_aarch32_serror __exception_restore_context: ldr w19, [sp, #16 * 16] ldp lr, x20, [sp, #16 * 15] msr SPSR_EL1, x19 msr ELR_EL1, x20 ldp x0, x1, [sp, #16 * 0] ldp x2, x3, [sp, #16 * 1] ldp x4, x5, [sp, #16 * 2] ldp x6, x7, [sp, #16 * 3] ldp x8, x9, [sp, #16 * 4] ldp x10, x11, [sp, #16 * 5] ldp x12, x13, [sp, #16 * 6] ldp x14, x15, [sp, #16 * 7] ldp x16, x17, [sp, #16 * 8] ldp x18, x19, [sp, #16 * 9] ldp x20, x21, [sp, #16 * 10] ldp x22, x23, [sp, #16 * 11] ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] add sp, sp, #16 * 17 eret
Sc1pex/rustos
1,025
chainloader/src/boot/boot.S
.section ".text.boot" .global _start // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g2:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm _start: mrs x1, mpidr_el1 and x1, x1, #3 cbnz x1, 3f ldr x1, =__bss_start ldr w2, =__bss_size 1: cbz w2, 2f str xzr, [x1], #8 sub w2, w2, #1 cbnz w2, 1b 2: // Kernel is loaded at __load_addr, but is linked to be loaded at __bin_start // So move it there, making space for a new kernel to be sent over serial ldr x1, =__load_addr ldr x2, =__bin_start ldr x3, =__bin_end 4: ldr x4, [x1], #8 str x4, [x2], #8 cmp x2, x3 bne 4b ldr x1, =_start mov sp, x1 // Now the kernel is at __bin_start, where the linker assumed it would be ADR_ABS x1, __start_rust br x1 3: wfe b 3b
schechenkin/rust-raspberrypi-4-os
4,259
kernel/src/_arch/aarch64/exception.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- /// Call the function provided by parameter `\handler` after saving the exception context. Provide /// the context as the first parameter to '\handler'. .macro CALL_WITH_CONTEXT handler __vector_\handler: // Make room on the stack for the exception context. sub sp, sp, #16 * 17 // Store all general purpose registers on the stack. stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7] stp x16, x17, [sp, #16 * 8] stp x18, x19, [sp, #16 * 9] stp x20, x21, [sp, #16 * 10] stp x22, x23, [sp, #16 * 11] stp x24, x25, [sp, #16 * 12] stp x26, x27, [sp, #16 * 13] stp x28, x29, [sp, #16 * 14] // Add the exception link register (ELR_EL1), saved program status (SPSR_EL1) and exception // syndrome register (ESR_EL1). mrs x1, ELR_EL1 mrs x2, SPSR_EL1 mrs x3, ESR_EL1 stp lr, x1, [sp, #16 * 15] stp x2, x3, [sp, #16 * 16] // x0 is the first argument for the function called through `\handler`. mov x0, sp // Call `\handler`. bl \handler // After returning from exception handling code, replay the saved context and return via // `eret`. b __exception_restore_context .size __vector_\handler, . - __vector_\handler .type __vector_\handler, function .endm .macro FIQ_SUSPEND 1: wfe b 1b .endm //-------------------------------------------------------------------------------------------------- // Private Code //-------------------------------------------------------------------------------------------------- .section .text //------------------------------------------------------------------------------ // The exception vector table. //------------------------------------------------------------------------------ // Align by 2^11 bytes, as demanded by ARMv8-A. Same as ALIGN(2048) in an ld script. .align 11 // Export a symbol for the Rust code to use. __exception_vector_start: // Current exception level with SP_EL0. // // .org sets the offset relative to section start. // // # Safety // // - It must be ensured that `CALL_WITH_CONTEXT` <= 0x80 bytes. .org 0x000 CALL_WITH_CONTEXT current_el0_synchronous .org 0x080 CALL_WITH_CONTEXT current_el0_irq .org 0x100 FIQ_SUSPEND .org 0x180 CALL_WITH_CONTEXT current_el0_serror // Current exception level with SP_ELx, x > 0. .org 0x200 CALL_WITH_CONTEXT current_elx_synchronous .org 0x280 CALL_WITH_CONTEXT current_elx_irq .org 0x300 FIQ_SUSPEND .org 0x380 CALL_WITH_CONTEXT current_elx_serror // Lower exception level, AArch64 .org 0x400 CALL_WITH_CONTEXT lower_aarch64_synchronous .org 0x480 CALL_WITH_CONTEXT lower_aarch64_irq .org 0x500 FIQ_SUSPEND .org 0x580 CALL_WITH_CONTEXT lower_aarch64_serror // Lower exception level, AArch32 .org 0x600 CALL_WITH_CONTEXT lower_aarch32_synchronous .org 0x680 CALL_WITH_CONTEXT lower_aarch32_irq .org 0x700 FIQ_SUSPEND .org 0x780 CALL_WITH_CONTEXT lower_aarch32_serror .org 0x800 //------------------------------------------------------------------------------ // fn __exception_restore_context() //------------------------------------------------------------------------------ __exception_restore_context: ldr w19, [sp, #16 * 16] ldp lr, x20, [sp, #16 * 15] msr SPSR_EL1, x19 msr ELR_EL1, x20 ldp x0, x1, [sp, #16 * 0] ldp x2, x3, [sp, #16 * 1] ldp x4, x5, [sp, #16 * 2] ldp x6, x7, [sp, #16 * 3] ldp x8, x9, [sp, #16 * 4] ldp x10, x11, [sp, #16 * 5] ldp x12, x13, [sp, #16 * 6] ldp x14, x15, [sp, #16 * 7] ldp x16, x17, [sp, #16 * 8] ldp x18, x19, [sp, #16 * 9] ldp x20, x21, [sp, #16 * 10] ldp x22, x23, [sp, #16 * 11] ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] add sp, sp, #16 * 17 eret .size __exception_restore_context, . - __exception_restore_context .type __exception_restore_context, function
schechenkin/rust-raspberrypi-4-os
3,575
kernel/src/_arch/aarch64/cpu/boot.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g3:\symbol movk \register, #:abs_g2_nc:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed if the core executes in EL2. Park it otherwise. mrs x0, CurrentEL cmp x0, {CONST_CURRENTEL_EL2} b.ne .L_parking_loop // Only proceed on the boot core. Park it otherwise. mrs x1, MPIDR_EL1 and x1, x1, {CONST_CORE_ID_MASK} ldr x2, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs cmp x1, x2 b.ne .L_parking_loop // If execution reaches here, it is the boot core. // Initialize DRAM. ADR_REL x0, __bss_start ADR_REL x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_prepare_rust stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Prepare the jump to Rust code. .L_prepare_rust: // Load the base address of the kernel's translation tables. ldr x0, PHYS_KERNEL_TABLES_BASE_ADDR // provided by bsp/__board_name__/memory/mmu.rs // Load the _absolute_ addresses of the following symbols. Since the kernel is linked at // the top of the 64 bit address space, these are effectively virtual addresses. ADR_ABS x1, __boot_core_stack_end_exclusive ADR_ABS x2, kernel_init // Load the PC-relative address of the stack and set the stack pointer. // // Since _start() is the first function that runs after the firmware has loaded the kernel // into memory, retrieving this symbol PC-relative returns the "physical" address. // // Setting the stack pointer to this value ensures that anything that still runs in EL2, // until the kernel returns to EL1 with the MMU enabled, works as well. After the return to // EL1, the virtual address of the stack retrieved above will be used. ADR_REL x3, __boot_core_stack_end_exclusive mov sp, x3 // Read the CPU's timer counter frequency and store it in ARCH_TIMER_COUNTER_FREQUENCY. // Abort if the frequency read back as 0. ADR_REL x4, ARCH_TIMER_COUNTER_FREQUENCY // provided by aarch64/time.rs mrs x5, CNTFRQ_EL0 cmp x5, xzr b.eq .L_parking_loop str w5, [x4] // Jump to Rust code. x0, x1 and x2 hold the function arguments provided to _start_rust(). b _start_rust // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
Scvpn/vvip
2,249
websocket-python/websocket.s
#!/bin/bash clear echo Installing Websocket-SSH Python sleep 1 echo Sila Tunggu Sebentar... sleep 0.5 cd # // SYSTEM WEBSOCKET HTTPS cat <<EOF> /etc/systemd/system/ws-https.service [Unit] Description=Python Proxy Documentation=https://t.me/Hendra2012 After=network.target nss-lookup.target [Service] Type=simple User=root CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE NoNewPrivileges=true Restart=on-failure ExecStart=/usr/bin/python -O /usr/local/bin/ws-https [Install] WantedBy=multi-user.target EOF # // SYSTEM WEBSOCKET HTTP cat <<EOF> /etc/systemd/system/ws-http.service [Unit] Description=Python Proxy Documentation=https://t.me/Hendra2012 After=network.target nss-lookup.target [Service] Type=simple User=root CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE NoNewPrivileges=true ExecStart=/usr/bin/python -O /usr/local/bin/ws-http Restart=on-failure [Install] WantedBy=multi-user.target EOF # // SYSTEM WEBSOCKET OVPN cat <<EOF> /etc/systemd/system/ws-ovpn.service [Unit] Description=Python Proxy Documentation=https://t.me/Hendra2012 After=network.target nss-lookup.target [Service] Type=simple User=root CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE NoNewPrivileges=true ExecStart=/usr/bin/python -O /usr/local/bin/ws-ovpn 2086 Restart=on-failure [Install] WantedBy=multi-user.target EOF # // PYTHON WEBSOCKET TLS && NONE wget -q -O /usr/local/bin/ws-https https://raw.githubusercontent.com/Scvpn/vvip/main/websocket-python/ws-https; chmod +x /usr/local/bin/ws-https # // PYTHON WEBSOCKET DROPBEAR wget -q -O /usr/local/bin/ws-http https://raw.githubusercontent.com/Scvpn/vvip/main/websocket-python/ws-http; chmod +x /usr/local/bin/ws-http # // PYTHON WEBSOCKET OVPN wget -q -O /usr/local/bin/ws-ovpn https://raw.githubusercontent.com/Scvpn/vvip/main/websocket-python/ws-ovpn; chmod +x /usr/local/bin/ws-ovpn # // RESTART && ENABLE SSHVPN WEBSOCKET TLS systemctl daemon-reload systemctl enable ws-https systemctl restart ws-https systemctl enable ws-http systemctl restart ws-http systemctl enable ws-ovpn systemctl restart ws-ovpn
semyeong-yu/RadFoam
66,735
external/submodules/mesa/src/util/blake3/blake3_avx2_x86-64_windows_gnu.S
.intel_syntax noprefix .global _blake3_hash_many_avx2 .global blake3_hash_many_avx2 .section .text .p2align 6 _blake3_hash_many_avx2: blake3_hash_many_avx2: push r15 push r14 push r13 push r12 push rsi push rdi push rbx push rbp mov rbp, rsp sub rsp, 880 and rsp, 0xFFFFFFFFFFFFFFC0 vmovdqa xmmword ptr [rsp+0x2D0], xmm6 vmovdqa xmmword ptr [rsp+0x2E0], xmm7 vmovdqa xmmword ptr [rsp+0x2F0], xmm8 vmovdqa xmmword ptr [rsp+0x300], xmm9 vmovdqa xmmword ptr [rsp+0x310], xmm10 vmovdqa xmmword ptr [rsp+0x320], xmm11 vmovdqa xmmword ptr [rsp+0x330], xmm12 vmovdqa xmmword ptr [rsp+0x340], xmm13 vmovdqa xmmword ptr [rsp+0x350], xmm14 vmovdqa xmmword ptr [rsp+0x360], xmm15 mov rdi, rcx mov rsi, rdx mov rdx, r8 mov rcx, r9 mov r8, qword ptr [rbp+0x68] movzx r9, byte ptr [rbp+0x70] neg r9d vmovd xmm0, r9d vpbroadcastd ymm0, xmm0 vmovdqa ymmword ptr [rsp+0x260], ymm0 vpand ymm1, ymm0, ymmword ptr [ADD0+rip] vpand ymm2, ymm0, ymmword ptr [ADD1+rip] vmovdqa ymmword ptr [rsp+0x2A0], ymm2 vmovd xmm2, r8d vpbroadcastd ymm2, xmm2 vpaddd ymm2, ymm2, ymm1 vmovdqa ymmword ptr [rsp+0x220], ymm2 vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip] vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip] vpcmpgtd ymm2, ymm1, ymm2 shr r8, 32 vmovd xmm3, r8d vpbroadcastd ymm3, xmm3 vpsubd ymm3, ymm3, ymm2 vmovdqa ymmword ptr [rsp+0x240], ymm3 shl rdx, 6 mov qword ptr [rsp+0x2C0], rdx cmp rsi, 8 jc 3f 2: vpbroadcastd ymm0, dword ptr [rcx] vpbroadcastd ymm1, dword ptr [rcx+0x4] vpbroadcastd ymm2, dword ptr [rcx+0x8] vpbroadcastd ymm3, dword ptr [rcx+0xC] vpbroadcastd ymm4, dword ptr [rcx+0x10] vpbroadcastd ymm5, dword ptr [rcx+0x14] vpbroadcastd ymm6, dword ptr [rcx+0x18] vpbroadcastd ymm7, dword ptr [rcx+0x1C] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x20] mov r13, qword ptr [rdi+0x28] mov r14, qword ptr [rdi+0x30] mov r15, qword ptr [rdi+0x38] movzx eax, byte ptr [rbp+0x78] movzx ebx, byte ptr [rbp+0x80] or eax, ebx xor edx, edx .p2align 5 9: movzx ebx, byte ptr [rbp+0x88] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x2C0] cmove eax, ebx mov dword ptr [rsp+0x200], eax vmovups xmm8, xmmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x40] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x40] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x20], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x40], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x60], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x30] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x30] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x80], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0xA0], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0xC0], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0xE0], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x20] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x20] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x100], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x120], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x140], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x160], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x10] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x10] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x180], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x1A0], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x1C0], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x1E0], ymm11 vpbroadcastd ymm15, dword ptr [rsp+0x200] prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] vpaddd ymm0, ymm0, ymmword ptr [rsp] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm0, ymmword ptr [rsp+0x220] vpxor ymm13, ymm1, ymmword ptr [rsp+0x240] vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip] vpxor ymm15, ymm3, ymm15 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip] vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip] vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip] vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip] vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vpxor ymm0, ymm0, ymm8 vpxor ymm1, ymm1, ymm9 vpxor ymm2, ymm2, ymm10 vpxor ymm3, ymm3, ymm11 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpxor ymm4, ymm4, ymm12 vpxor ymm5, ymm5, ymm13 vpxor ymm6, ymm6, ymm14 vpxor ymm7, ymm7, ymm15 movzx eax, byte ptr [rbp+0x78] jne 9b mov rbx, qword ptr [rbp+0x90] vunpcklps ymm8, ymm0, ymm1 vunpcklps ymm9, ymm2, ymm3 vunpckhps ymm10, ymm0, ymm1 vunpcklps ymm11, ymm4, ymm5 vunpcklps ymm0, ymm6, ymm7 vshufps ymm12, ymm8, ymm9, 78 vblendps ymm1, ymm8, ymm12, 0xCC vshufps ymm8, ymm11, ymm0, 78 vunpckhps ymm13, ymm2, ymm3 vblendps ymm2, ymm11, ymm8, 0xCC vblendps ymm3, ymm12, ymm9, 0xCC vperm2f128 ymm12, ymm1, ymm2, 0x20 vmovups ymmword ptr [rbx], ymm12 vunpckhps ymm14, ymm4, ymm5 vblendps ymm4, ymm8, ymm0, 0xCC vunpckhps ymm15, ymm6, ymm7 vperm2f128 ymm7, ymm3, ymm4, 0x20 vmovups ymmword ptr [rbx+0x20], ymm7 vshufps ymm5, ymm10, ymm13, 78 vblendps ymm6, ymm5, ymm13, 0xCC vshufps ymm13, ymm14, ymm15, 78 vblendps ymm10, ymm10, ymm5, 0xCC vblendps ymm14, ymm14, ymm13, 0xCC vperm2f128 ymm8, ymm10, ymm14, 0x20 vmovups ymmword ptr [rbx+0x40], ymm8 vblendps ymm15, ymm13, ymm15, 0xCC vperm2f128 ymm13, ymm6, ymm15, 0x20 vmovups ymmword ptr [rbx+0x60], ymm13 vperm2f128 ymm9, ymm1, ymm2, 0x31 vperm2f128 ymm11, ymm3, ymm4, 0x31 vmovups ymmword ptr [rbx+0x80], ymm9 vperm2f128 ymm14, ymm10, ymm14, 0x31 vperm2f128 ymm15, ymm6, ymm15, 0x31 vmovups ymmword ptr [rbx+0xA0], ymm11 vmovups ymmword ptr [rbx+0xC0], ymm14 vmovups ymmword ptr [rbx+0xE0], ymm15 vmovdqa ymm0, ymmword ptr [rsp+0x2A0] vpaddd ymm1, ymm0, ymmword ptr [rsp+0x220] vmovdqa ymmword ptr [rsp+0x220], ymm1 vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip] vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip] vpcmpgtd ymm2, ymm0, ymm2 vmovdqa ymm0, ymmword ptr [rsp+0x240] vpsubd ymm2, ymm0, ymm2 vmovdqa ymmword ptr [rsp+0x240], ymm2 add rdi, 64 add rbx, 256 mov qword ptr [rbp+0x90], rbx sub rsi, 8 cmp rsi, 8 jnc 2b test rsi, rsi jnz 3f 4: vzeroupper vmovdqa xmm6, xmmword ptr [rsp+0x2D0] vmovdqa xmm7, xmmword ptr [rsp+0x2E0] vmovdqa xmm8, xmmword ptr [rsp+0x2F0] vmovdqa xmm9, xmmword ptr [rsp+0x300] vmovdqa xmm10, xmmword ptr [rsp+0x310] vmovdqa xmm11, xmmword ptr [rsp+0x320] vmovdqa xmm12, xmmword ptr [rsp+0x330] vmovdqa xmm13, xmmword ptr [rsp+0x340] vmovdqa xmm14, xmmword ptr [rsp+0x350] vmovdqa xmm15, xmmword ptr [rsp+0x360] mov rsp, rbp pop rbp pop rbx pop rdi pop rsi pop r12 pop r13 pop r14 pop r15 ret .p2align 5 3: mov rbx, qword ptr [rbp+0x90] mov r15, qword ptr [rsp+0x2C0] movzx r13d, byte ptr [rbp+0x78] movzx r12d, byte ptr [rbp+0x88] test rsi, 0x4 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovdqa ymm8, ymm0 vmovdqa ymm9, ymm1 vbroadcasti128 ymm12, xmmword ptr [rsp+0x220] vbroadcasti128 ymm13, xmmword ptr [rsp+0x240] vpunpckldq ymm14, ymm12, ymm13 vpunpckhdq ymm15, ymm12, ymm13 vpermq ymm14, ymm14, 0x50 vpermq ymm15, ymm15, 0x50 vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip] vpblendd ymm14, ymm14, ymm12, 0x44 vpblendd ymm15, ymm15, ymm12, 0x44 vmovdqa ymmword ptr [rsp], ymm14 vmovdqa ymmword ptr [rsp+0x20], ymm15 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x200], eax vmovups ymm2, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm3, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm2, ymm3, 136 vshufps ymm5, ymm2, ymm3, 221 vmovups ymm2, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm3, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm2, ymm3, 136 vshufps ymm7, ymm2, ymm3, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 vmovups ymm10, ymmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01 vmovups ymm11, ymmword ptr [r10+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01 vshufps ymm12, ymm10, ymm11, 136 vshufps ymm13, ymm10, ymm11, 221 vmovups ymm10, ymmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01 vmovups ymm11, ymmword ptr [r10+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01 vshufps ymm14, ymm10, ymm11, 136 vshufps ymm15, ymm10, ymm11, 221 vpshufd ymm14, ymm14, 0x93 vpshufd ymm15, ymm15, 0x93 vpbroadcastd ymm2, dword ptr [rsp+0x200] vmovdqa ymm3, ymmword ptr [rsp] vmovdqa ymm11, ymmword ptr [rsp+0x20] vpblendd ymm3, ymm3, ymm2, 0x88 vpblendd ymm11, ymm11, ymm2, 0x88 vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vmovdqa ymm10, ymm2 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm8, ymm8, ymm12 vmovdqa ymmword ptr [rsp+0x40], ymm4 nop vmovdqa ymmword ptr [rsp+0x60], ymm12 nop vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT16+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 12 vpslld ymm9, ymm9, 20 vpor ymm9, ymm9, ymm4 vpaddd ymm0, ymm0, ymm5 vpaddd ymm8, ymm8, ymm13 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vmovdqa ymmword ptr [rsp+0x80], ymm5 vmovdqa ymmword ptr [rsp+0xA0], ymm13 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT8+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 7 vpslld ymm9, ymm9, 25 vpor ymm9, ymm9, ymm4 vpshufd ymm0, ymm0, 0x93 vpshufd ymm8, ymm8, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm11, ymm11, 0x4E vpshufd ymm2, ymm2, 0x39 vpshufd ymm10, ymm10, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm8, ymm8, ymm14 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT16+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 12 vpslld ymm9, ymm9, 20 vpor ymm9, ymm9, ymm4 vpaddd ymm0, ymm0, ymm7 vpaddd ymm8, ymm8, ymm15 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT8+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 7 vpslld ymm9, ymm9, 25 vpor ymm9, ymm9, ymm4 vpshufd ymm0, ymm0, 0x39 vpshufd ymm8, ymm8, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm11, ymm11, 0x4E vpshufd ymm2, ymm2, 0x93 vpshufd ymm10, ymm10, 0x93 dec al je 9f vmovdqa ymm4, ymmword ptr [rsp+0x40] vmovdqa ymm5, ymmword ptr [rsp+0x80] vshufps ymm12, ymm4, ymm5, 214 vpshufd ymm13, ymm4, 0x0F vpshufd ymm4, ymm12, 0x39 vshufps ymm12, ymm6, ymm7, 250 vpblendd ymm13, ymm13, ymm12, 0xAA vpunpcklqdq ymm12, ymm7, ymm5 vpblendd ymm12, ymm12, ymm6, 0x88 vpshufd ymm12, ymm12, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymmword ptr [rsp+0x40], ymm13 vmovdqa ymmword ptr [rsp+0x80], ymm12 vmovdqa ymm12, ymmword ptr [rsp+0x60] vmovdqa ymm13, ymmword ptr [rsp+0xA0] vshufps ymm5, ymm12, ymm13, 214 vpshufd ymm6, ymm12, 0x0F vpshufd ymm12, ymm5, 0x39 vshufps ymm5, ymm14, ymm15, 250 vpblendd ymm6, ymm6, ymm5, 0xAA vpunpcklqdq ymm5, ymm15, ymm13 vpblendd ymm5, ymm5, ymm14, 0x88 vpshufd ymm5, ymm5, 0x78 vpunpckhdq ymm13, ymm13, ymm15 vpunpckldq ymm14, ymm14, ymm13 vpshufd ymm15, ymm14, 0x1E vmovdqa ymm13, ymm6 vmovdqa ymm14, ymm5 vmovdqa ymm5, ymmword ptr [rsp+0x40] vmovdqa ymm6, ymmword ptr [rsp+0x80] jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 vpxor ymm8, ymm8, ymm10 vpxor ymm9, ymm9, ymm11 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovdqu xmmword ptr [rbx+0x40], xmm8 vmovdqu xmmword ptr [rbx+0x50], xmm9 vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01 vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01 vmovaps xmm8, xmmword ptr [rsp+0x260] vmovaps xmm0, xmmword ptr [rsp+0x220] vmovaps xmm1, xmmword ptr [rsp+0x230] vmovaps xmm2, xmmword ptr [rsp+0x240] vmovaps xmm3, xmmword ptr [rsp+0x250] vblendvps xmm0, xmm0, xmm1, xmm8 vblendvps xmm2, xmm2, xmm3, xmm8 vmovaps xmmword ptr [rsp+0x220], xmm0 vmovaps xmmword ptr [rsp+0x240], xmm2 add rbx, 128 add rdi, 32 sub rsi, 4 3: test rsi, 0x2 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovd xmm13, dword ptr [rsp+0x220] vpinsrd xmm13, xmm13, dword ptr [rsp+0x240], 1 vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovd xmm14, dword ptr [rsp+0x224] vpinsrd xmm14, xmm14, dword ptr [rsp+0x244], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vinserti128 ymm13, ymm13, xmm14, 0x01 vbroadcasti128 ymm14, xmmword ptr [ROT16+rip] vbroadcasti128 ymm15, xmmword ptr [ROT8+rip] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x200], eax vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vpbroadcastd ymm8, dword ptr [rsp+0x200] vpblendd ymm3, ymm13, ymm8, 0x88 vmovups ymm8, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm8, ymm9, 136 vshufps ymm5, ymm8, ymm9, 221 vmovups ymm8, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm8, ymm9, 136 vshufps ymm7, ymm8, ymm9, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm14 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm8 vpaddd ymm0, ymm0, ymm5 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm15 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm8 vpshufd ymm0, ymm0, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm14 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm8 vpaddd ymm0, ymm0, ymm7 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm15 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm8 vpshufd ymm0, ymm0, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x93 dec al jz 9f vshufps ymm8, ymm4, ymm5, 214 vpshufd ymm9, ymm4, 0x0F vpshufd ymm4, ymm8, 0x39 vshufps ymm8, ymm6, ymm7, 250 vpblendd ymm9, ymm9, ymm8, 0xAA vpunpcklqdq ymm8, ymm7, ymm5 vpblendd ymm8, ymm8, ymm6, 0x88 vpshufd ymm8, ymm8, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymm5, ymm9 vmovdqa ymm6, ymm8 jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovaps ymm8, ymmword ptr [rsp+0x260] vmovaps ymm0, ymmword ptr [rsp+0x220] vmovups ymm1, ymmword ptr [rsp+0x228] vmovaps ymm2, ymmword ptr [rsp+0x240] vmovups ymm3, ymmword ptr [rsp+0x248] vblendvps ymm0, ymm0, ymm1, ymm8 vblendvps ymm2, ymm2, ymm3, ymm8 vmovaps ymmword ptr [rsp+0x220], ymm0 vmovaps ymmword ptr [rsp+0x240], ymm2 add rbx, 64 add rdi, 16 sub rsi, 2 3: test rsi, 0x1 je 4b vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] vmovd xmm3, dword ptr [rsp+0x220] vpinsrd xmm3, xmm3, dword ptr [rsp+0x240], 1 vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovdqa xmm14, xmmword ptr [ROT16+rip] vmovdqa xmm15, xmmword ptr [ROT8+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip] vmovdqa xmm3, xmm13 vpinsrd xmm3, xmm3, eax, 3 vmovups xmm8, xmmword ptr [r8+rdx-0x40] vmovups xmm9, xmmword ptr [r8+rdx-0x30] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vmovups xmm9, xmmword ptr [r8+rdx-0x10] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm14 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 12 vpslld xmm1, xmm1, 20 vpor xmm1, xmm1, xmm8 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm15 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 7 vpslld xmm1, xmm1, 25 vpor xmm1, xmm1, xmm8 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm14 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 12 vpslld xmm1, xmm1, 20 vpor xmm1, xmm1, xmm8 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm15 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 7 vpslld xmm1, xmm1, 25 vpor xmm1, xmm1, xmm8 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 jmp 4b .section .rdata .p2align 6 ADD0: .long 0, 1, 2, 3, 4, 5, 6, 7 ADD1: .long 8, 8, 8, 8, 8, 8, 8, 8 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 0x00000040, 0x00000040, 0x00000040, 0x00000040 .long 0x00000040, 0x00000040, 0x00000040, 0x00000040 ROT16: .byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13 ROT8: .byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
semyeong-yu/RadFoam
89,612
external/submodules/mesa/src/util/blake3/blake3_avx512_x86-64_unix.S
#include "mesa_blake3_visibility.h" #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits #endif #if defined(__ELF__) && defined(__CET__) && defined(__has_include) #if __has_include(<cet.h>) #include <cet.h> #endif #endif #if !defined(_CET_ENDBR) #define _CET_ENDBR #endif .intel_syntax noprefix HIDDEN _blake3_hash_many_avx512 HIDDEN blake3_hash_many_avx512 HIDDEN blake3_compress_in_place_avx512 HIDDEN _blake3_compress_in_place_avx512 HIDDEN blake3_compress_xof_avx512 HIDDEN _blake3_compress_xof_avx512 .global _blake3_hash_many_avx512 .global blake3_hash_many_avx512 .global blake3_compress_in_place_avx512 .global _blake3_compress_in_place_avx512 .global blake3_compress_xof_avx512 .global _blake3_compress_xof_avx512 #ifdef __APPLE__ .text #else .section .text #endif .p2align 6 _blake3_hash_many_avx512: blake3_hash_many_avx512: _CET_ENDBR push r15 push r14 push r13 push r12 push rbx push rbp mov rbp, rsp sub rsp, 144 and rsp, 0xFFFFFFFFFFFFFFC0 neg r9 kmovw k1, r9d vmovd xmm0, r8d vpbroadcastd ymm0, xmm0 shr r8, 32 vmovd xmm1, r8d vpbroadcastd ymm1, xmm1 vmovdqa ymm4, ymm1 vmovdqa ymm5, ymm1 vpaddd ymm2, ymm0, ymmword ptr [ADD0+rip] vpaddd ymm3, ymm0, ymmword ptr [ADD0+32+rip] vpcmpltud k2, ymm2, ymm0 vpcmpltud k3, ymm3, ymm0 vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1+rip] {1to8} vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1+rip] {1to8} knotw k2, k1 vmovdqa32 ymm2 {k2}, ymm0 vmovdqa32 ymm3 {k2}, ymm0 vmovdqa32 ymm4 {k2}, ymm1 vmovdqa32 ymm5 {k2}, ymm1 vmovdqa ymmword ptr [rsp], ymm2 vmovdqa ymmword ptr [rsp+0x1*0x20], ymm3 vmovdqa ymmword ptr [rsp+0x2*0x20], ymm4 vmovdqa ymmword ptr [rsp+0x3*0x20], ymm5 shl rdx, 6 mov qword ptr [rsp+0x80], rdx cmp rsi, 16 jc 3f 2: vpbroadcastd zmm0, dword ptr [rcx] vpbroadcastd zmm1, dword ptr [rcx+0x1*0x4] vpbroadcastd zmm2, dword ptr [rcx+0x2*0x4] vpbroadcastd zmm3, dword ptr [rcx+0x3*0x4] vpbroadcastd zmm4, dword ptr [rcx+0x4*0x4] vpbroadcastd zmm5, dword ptr [rcx+0x5*0x4] vpbroadcastd zmm6, dword ptr [rcx+0x6*0x4] vpbroadcastd zmm7, dword ptr [rcx+0x7*0x4] movzx eax, byte ptr [rbp+0x38] movzx ebx, byte ptr [rbp+0x40] or eax, ebx xor edx, edx .p2align 5 9: movzx ebx, byte ptr [rbp+0x48] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x80] cmove eax, ebx mov dword ptr [rsp+0x88], eax mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x40] mov r13, qword ptr [rdi+0x48] mov r14, qword ptr [rdi+0x50] mov r15, qword ptr [rdi+0x58] vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20] vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01 vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20] vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01 vpunpcklqdq zmm8, zmm16, zmm17 vpunpckhqdq zmm9, zmm16, zmm17 vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20] vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01 vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20] vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01 vpunpcklqdq zmm10, zmm18, zmm19 vpunpckhqdq zmm11, zmm18, zmm19 mov r8, qword ptr [rdi+0x20] mov r9, qword ptr [rdi+0x28] mov r10, qword ptr [rdi+0x30] mov r11, qword ptr [rdi+0x38] mov r12, qword ptr [rdi+0x60] mov r13, qword ptr [rdi+0x68] mov r14, qword ptr [rdi+0x70] mov r15, qword ptr [rdi+0x78] vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20] vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01 vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20] vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01 vpunpcklqdq zmm12, zmm16, zmm17 vpunpckhqdq zmm13, zmm16, zmm17 vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20] vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01 vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20] vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01 vpunpcklqdq zmm14, zmm18, zmm19 vpunpckhqdq zmm15, zmm18, zmm19 vmovdqa32 zmm27, zmmword ptr [INDEX0+rip] vmovdqa32 zmm31, zmmword ptr [INDEX1+rip] vshufps zmm16, zmm8, zmm10, 136 vshufps zmm17, zmm12, zmm14, 136 vmovdqa32 zmm20, zmm16 vpermt2d zmm16, zmm27, zmm17 vpermt2d zmm20, zmm31, zmm17 vshufps zmm17, zmm8, zmm10, 221 vshufps zmm30, zmm12, zmm14, 221 vmovdqa32 zmm21, zmm17 vpermt2d zmm17, zmm27, zmm30 vpermt2d zmm21, zmm31, zmm30 vshufps zmm18, zmm9, zmm11, 136 vshufps zmm8, zmm13, zmm15, 136 vmovdqa32 zmm22, zmm18 vpermt2d zmm18, zmm27, zmm8 vpermt2d zmm22, zmm31, zmm8 vshufps zmm19, zmm9, zmm11, 221 vshufps zmm8, zmm13, zmm15, 221 vmovdqa32 zmm23, zmm19 vpermt2d zmm19, zmm27, zmm8 vpermt2d zmm23, zmm31, zmm8 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x40] mov r13, qword ptr [rdi+0x48] mov r14, qword ptr [rdi+0x50] mov r15, qword ptr [rdi+0x58] vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm8, zmm24, zmm25 vpunpckhqdq zmm9, zmm24, zmm25 vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm10, zmm24, zmm25 vpunpckhqdq zmm11, zmm24, zmm25 prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] mov r8, qword ptr [rdi+0x20] mov r9, qword ptr [rdi+0x28] mov r10, qword ptr [rdi+0x30] mov r11, qword ptr [rdi+0x38] mov r12, qword ptr [rdi+0x60] mov r13, qword ptr [rdi+0x68] mov r14, qword ptr [rdi+0x70] mov r15, qword ptr [rdi+0x78] vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm12, zmm24, zmm25 vpunpckhqdq zmm13, zmm24, zmm25 vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm14, zmm24, zmm25 vpunpckhqdq zmm15, zmm24, zmm25 prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] vshufps zmm24, zmm8, zmm10, 136 vshufps zmm30, zmm12, zmm14, 136 vmovdqa32 zmm28, zmm24 vpermt2d zmm24, zmm27, zmm30 vpermt2d zmm28, zmm31, zmm30 vshufps zmm25, zmm8, zmm10, 221 vshufps zmm30, zmm12, zmm14, 221 vmovdqa32 zmm29, zmm25 vpermt2d zmm25, zmm27, zmm30 vpermt2d zmm29, zmm31, zmm30 vshufps zmm26, zmm9, zmm11, 136 vshufps zmm8, zmm13, zmm15, 136 vmovdqa32 zmm30, zmm26 vpermt2d zmm26, zmm27, zmm8 vpermt2d zmm30, zmm31, zmm8 vshufps zmm8, zmm9, zmm11, 221 vshufps zmm10, zmm13, zmm15, 221 vpermi2d zmm27, zmm8, zmm10 vpermi2d zmm31, zmm8, zmm10 vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0+rip] vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1+rip] vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2+rip] vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3+rip] vmovdqa32 zmm12, zmmword ptr [rsp] vmovdqa32 zmm13, zmmword ptr [rsp+0x1*0x40] vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN+rip] vpbroadcastd zmm15, dword ptr [rsp+0x22*0x4] vpaddd zmm0, zmm0, zmm16 vpaddd zmm1, zmm1, zmm18 vpaddd zmm2, zmm2, zmm20 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm17 vpaddd zmm1, zmm1, zmm19 vpaddd zmm2, zmm2, zmm21 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm24 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm28 vpaddd zmm3, zmm3, zmm30 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm25 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm29 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm18 vpaddd zmm1, zmm1, zmm19 vpaddd zmm2, zmm2, zmm23 vpaddd zmm3, zmm3, zmm20 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm22 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm16 vpaddd zmm3, zmm3, zmm29 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm17 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm25 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm27 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm30 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm19 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm29 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm20 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm18 vpaddd zmm3, zmm3, zmm30 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm22 vpaddd zmm1, zmm1, zmm25 vpaddd zmm2, zmm2, zmm27 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm21 vpaddd zmm1, zmm1, zmm16 vpaddd zmm2, zmm2, zmm31 vpaddd zmm3, zmm3, zmm17 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm26 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm30 vpaddd zmm3, zmm3, zmm29 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm23 vpaddd zmm1, zmm1, zmm25 vpaddd zmm2, zmm2, zmm19 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm20 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm21 vpaddd zmm3, zmm3, zmm17 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm16 vpaddd zmm1, zmm1, zmm18 vpaddd zmm2, zmm2, zmm24 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm28 vpaddd zmm1, zmm1, zmm25 vpaddd zmm2, zmm2, zmm31 vpaddd zmm3, zmm3, zmm30 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm29 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm26 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm23 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm16 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm18 vpaddd zmm1, zmm1, zmm19 vpaddd zmm2, zmm2, zmm17 vpaddd zmm3, zmm3, zmm20 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm25 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm24 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm30 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm28 vpaddd zmm3, zmm3, zmm17 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm29 vpaddd zmm1, zmm1, zmm16 vpaddd zmm2, zmm2, zmm18 vpaddd zmm3, zmm3, zmm20 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm19 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm22 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm27 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm17 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm31 vpaddd zmm1, zmm1, zmm16 vpaddd zmm2, zmm2, zmm25 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm30 vpaddd zmm1, zmm1, zmm18 vpaddd zmm2, zmm2, zmm19 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm26 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm20 vpaddd zmm3, zmm3, zmm29 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpxord zmm0, zmm0, zmm8 vpxord zmm1, zmm1, zmm9 vpxord zmm2, zmm2, zmm10 vpxord zmm3, zmm3, zmm11 vpxord zmm4, zmm4, zmm12 vpxord zmm5, zmm5, zmm13 vpxord zmm6, zmm6, zmm14 vpxord zmm7, zmm7, zmm15 movzx eax, byte ptr [rbp+0x38] jne 9b mov rbx, qword ptr [rbp+0x50] vpunpckldq zmm16, zmm0, zmm1 vpunpckhdq zmm17, zmm0, zmm1 vpunpckldq zmm18, zmm2, zmm3 vpunpckhdq zmm19, zmm2, zmm3 vpunpckldq zmm20, zmm4, zmm5 vpunpckhdq zmm21, zmm4, zmm5 vpunpckldq zmm22, zmm6, zmm7 vpunpckhdq zmm23, zmm6, zmm7 vpunpcklqdq zmm0, zmm16, zmm18 vpunpckhqdq zmm1, zmm16, zmm18 vpunpcklqdq zmm2, zmm17, zmm19 vpunpckhqdq zmm3, zmm17, zmm19 vpunpcklqdq zmm4, zmm20, zmm22 vpunpckhqdq zmm5, zmm20, zmm22 vpunpcklqdq zmm6, zmm21, zmm23 vpunpckhqdq zmm7, zmm21, zmm23 vshufi32x4 zmm16, zmm0, zmm4, 0x88 vshufi32x4 zmm17, zmm1, zmm5, 0x88 vshufi32x4 zmm18, zmm2, zmm6, 0x88 vshufi32x4 zmm19, zmm3, zmm7, 0x88 vshufi32x4 zmm20, zmm0, zmm4, 0xDD vshufi32x4 zmm21, zmm1, zmm5, 0xDD vshufi32x4 zmm22, zmm2, zmm6, 0xDD vshufi32x4 zmm23, zmm3, zmm7, 0xDD vshufi32x4 zmm0, zmm16, zmm17, 0x88 vshufi32x4 zmm1, zmm18, zmm19, 0x88 vshufi32x4 zmm2, zmm20, zmm21, 0x88 vshufi32x4 zmm3, zmm22, zmm23, 0x88 vshufi32x4 zmm4, zmm16, zmm17, 0xDD vshufi32x4 zmm5, zmm18, zmm19, 0xDD vshufi32x4 zmm6, zmm20, zmm21, 0xDD vshufi32x4 zmm7, zmm22, zmm23, 0xDD vmovdqu32 zmmword ptr [rbx], zmm0 vmovdqu32 zmmword ptr [rbx+0x1*0x40], zmm1 vmovdqu32 zmmword ptr [rbx+0x2*0x40], zmm2 vmovdqu32 zmmword ptr [rbx+0x3*0x40], zmm3 vmovdqu32 zmmword ptr [rbx+0x4*0x40], zmm4 vmovdqu32 zmmword ptr [rbx+0x5*0x40], zmm5 vmovdqu32 zmmword ptr [rbx+0x6*0x40], zmm6 vmovdqu32 zmmword ptr [rbx+0x7*0x40], zmm7 vmovdqa32 zmm0, zmmword ptr [rsp] vmovdqa32 zmm1, zmmword ptr [rsp+0x1*0x40] vmovdqa32 zmm2, zmm0 vpaddd zmm2{k1}, zmm0, dword ptr [ADD16+rip] {1to16} vpcmpltud k2, zmm2, zmm0 vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1+rip] {1to16} vmovdqa32 zmmword ptr [rsp], zmm2 vmovdqa32 zmmword ptr [rsp+0x1*0x40], zmm1 add rdi, 128 add rbx, 512 mov qword ptr [rbp+0x50], rbx sub rsi, 16 cmp rsi, 16 jnc 2b test rsi, rsi jnz 3f 4: vzeroupper mov rsp, rbp pop rbp pop rbx pop r12 pop r13 pop r14 pop r15 ret .p2align 6 3: test esi, 0x8 je 3f vpbroadcastd ymm0, dword ptr [rcx] vpbroadcastd ymm1, dword ptr [rcx+0x4] vpbroadcastd ymm2, dword ptr [rcx+0x8] vpbroadcastd ymm3, dword ptr [rcx+0xC] vpbroadcastd ymm4, dword ptr [rcx+0x10] vpbroadcastd ymm5, dword ptr [rcx+0x14] vpbroadcastd ymm6, dword ptr [rcx+0x18] vpbroadcastd ymm7, dword ptr [rcx+0x1C] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x20] mov r13, qword ptr [rdi+0x28] mov r14, qword ptr [rdi+0x30] mov r15, qword ptr [rdi+0x38] movzx eax, byte ptr [rbp+0x38] movzx ebx, byte ptr [rbp+0x40] or eax, ebx xor edx, edx 2: movzx ebx, byte ptr [rbp+0x48] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x80] cmove eax, ebx mov dword ptr [rsp+0x88], eax vmovups xmm8, xmmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x40] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x40] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm16, ymm12, ymm14, 136 vshufps ymm17, ymm12, ymm14, 221 vshufps ymm18, ymm13, ymm15, 136 vshufps ymm19, ymm13, ymm15, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x30] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x30] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm20, ymm12, ymm14, 136 vshufps ymm21, ymm12, ymm14, 221 vshufps ymm22, ymm13, ymm15, 136 vshufps ymm23, ymm13, ymm15, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x20] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x20] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm24, ymm12, ymm14, 136 vshufps ymm25, ymm12, ymm14, 221 vshufps ymm26, ymm13, ymm15, 136 vshufps ymm27, ymm13, ymm15, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x10] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x10] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm28, ymm12, ymm14, 136 vshufps ymm29, ymm12, ymm14, 221 vshufps ymm30, ymm13, ymm15, 136 vshufps ymm31, ymm13, ymm15, 221 vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0+rip] vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1+rip] vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2+rip] vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3+rip] vmovdqa ymm12, ymmword ptr [rsp] vmovdqa ymm13, ymmword ptr [rsp+0x40] vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN+rip] vpbroadcastd ymm15, dword ptr [rsp+0x88] vpaddd ymm0, ymm0, ymm16 vpaddd ymm1, ymm1, ymm18 vpaddd ymm2, ymm2, ymm20 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm17 vpaddd ymm1, ymm1, ymm19 vpaddd ymm2, ymm2, ymm21 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm24 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm28 vpaddd ymm3, ymm3, ymm30 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm25 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm29 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm18 vpaddd ymm1, ymm1, ymm19 vpaddd ymm2, ymm2, ymm23 vpaddd ymm3, ymm3, ymm20 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm22 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm16 vpaddd ymm3, ymm3, ymm29 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm17 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm25 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm27 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm30 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm19 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm29 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm20 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm18 vpaddd ymm3, ymm3, ymm30 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm22 vpaddd ymm1, ymm1, ymm25 vpaddd ymm2, ymm2, ymm27 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm21 vpaddd ymm1, ymm1, ymm16 vpaddd ymm2, ymm2, ymm31 vpaddd ymm3, ymm3, ymm17 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm26 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm30 vpaddd ymm3, ymm3, ymm29 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm23 vpaddd ymm1, ymm1, ymm25 vpaddd ymm2, ymm2, ymm19 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm20 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm21 vpaddd ymm3, ymm3, ymm17 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm16 vpaddd ymm1, ymm1, ymm18 vpaddd ymm2, ymm2, ymm24 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm28 vpaddd ymm1, ymm1, ymm25 vpaddd ymm2, ymm2, ymm31 vpaddd ymm3, ymm3, ymm30 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm29 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm26 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm23 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm16 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm18 vpaddd ymm1, ymm1, ymm19 vpaddd ymm2, ymm2, ymm17 vpaddd ymm3, ymm3, ymm20 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm25 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm24 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm30 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm28 vpaddd ymm3, ymm3, ymm17 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm29 vpaddd ymm1, ymm1, ymm16 vpaddd ymm2, ymm2, ymm18 vpaddd ymm3, ymm3, ymm20 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm19 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm22 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm27 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm17 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm31 vpaddd ymm1, ymm1, ymm16 vpaddd ymm2, ymm2, ymm25 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm30 vpaddd ymm1, ymm1, ymm18 vpaddd ymm2, ymm2, ymm19 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm26 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm20 vpaddd ymm3, ymm3, ymm29 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpxor ymm0, ymm0, ymm8 vpxor ymm1, ymm1, ymm9 vpxor ymm2, ymm2, ymm10 vpxor ymm3, ymm3, ymm11 vpxor ymm4, ymm4, ymm12 vpxor ymm5, ymm5, ymm13 vpxor ymm6, ymm6, ymm14 vpxor ymm7, ymm7, ymm15 movzx eax, byte ptr [rbp+0x38] jne 2b mov rbx, qword ptr [rbp+0x50] vunpcklps ymm8, ymm0, ymm1 vunpcklps ymm9, ymm2, ymm3 vunpckhps ymm10, ymm0, ymm1 vunpcklps ymm11, ymm4, ymm5 vunpcklps ymm0, ymm6, ymm7 vshufps ymm12, ymm8, ymm9, 78 vblendps ymm1, ymm8, ymm12, 0xCC vshufps ymm8, ymm11, ymm0, 78 vunpckhps ymm13, ymm2, ymm3 vblendps ymm2, ymm11, ymm8, 0xCC vblendps ymm3, ymm12, ymm9, 0xCC vperm2f128 ymm12, ymm1, ymm2, 0x20 vmovups ymmword ptr [rbx], ymm12 vunpckhps ymm14, ymm4, ymm5 vblendps ymm4, ymm8, ymm0, 0xCC vunpckhps ymm15, ymm6, ymm7 vperm2f128 ymm7, ymm3, ymm4, 0x20 vmovups ymmword ptr [rbx+0x20], ymm7 vshufps ymm5, ymm10, ymm13, 78 vblendps ymm6, ymm5, ymm13, 0xCC vshufps ymm13, ymm14, ymm15, 78 vblendps ymm10, ymm10, ymm5, 0xCC vblendps ymm14, ymm14, ymm13, 0xCC vperm2f128 ymm8, ymm10, ymm14, 0x20 vmovups ymmword ptr [rbx+0x40], ymm8 vblendps ymm15, ymm13, ymm15, 0xCC vperm2f128 ymm13, ymm6, ymm15, 0x20 vmovups ymmword ptr [rbx+0x60], ymm13 vperm2f128 ymm9, ymm1, ymm2, 0x31 vperm2f128 ymm11, ymm3, ymm4, 0x31 vmovups ymmword ptr [rbx+0x80], ymm9 vperm2f128 ymm14, ymm10, ymm14, 0x31 vperm2f128 ymm15, ymm6, ymm15, 0x31 vmovups ymmword ptr [rbx+0xA0], ymm11 vmovups ymmword ptr [rbx+0xC0], ymm14 vmovups ymmword ptr [rbx+0xE0], ymm15 vmovdqa ymm0, ymmword ptr [rsp] vmovdqa ymm2, ymmword ptr [rsp+0x2*0x20] vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+0x1*0x20] vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+0x3*0x20] vmovdqa ymmword ptr [rsp], ymm0 vmovdqa ymmword ptr [rsp+0x2*0x20], ymm2 add rbx, 256 mov qword ptr [rbp+0x50], rbx add rdi, 64 sub rsi, 8 3: mov rbx, qword ptr [rbp+0x50] mov r15, qword ptr [rsp+0x80] movzx r13, byte ptr [rbp+0x38] movzx r12, byte ptr [rbp+0x48] test esi, 0x4 je 3f vbroadcasti32x4 zmm0, xmmword ptr [rcx] vbroadcasti32x4 zmm1, xmmword ptr [rcx+0x1*0x10] vmovdqa xmm12, xmmword ptr [rsp] vmovdqa xmm13, xmmword ptr [rsp+0x4*0x10] vpunpckldq xmm14, xmm12, xmm13 vpunpckhdq xmm15, xmm12, xmm13 vpermq ymm14, ymm14, 0xDC vpermq ymm15, ymm15, 0xDC vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN+rip] vinserti64x4 zmm13, zmm14, ymm15, 0x01 mov eax, 17476 kmovw k2, eax vpblendmd zmm13 {k2}, zmm13, zmm12 vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV+rip] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov eax, 43690 kmovw k3, eax mov eax, 34952 kmovw k4, eax movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x88], eax vmovdqa32 zmm2, zmm15 vpbroadcastd zmm8, dword ptr [rsp+0x22*0x4] vpblendmd zmm3 {k4}, zmm13, zmm8 vmovups zmm8, zmmword ptr [r8+rdx-0x1*0x40] vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x4*0x10], 0x01 vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x4*0x10], 0x02 vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x4*0x10], 0x03 vmovups zmm9, zmmword ptr [r8+rdx-0x30] vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x3*0x10], 0x01 vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x3*0x10], 0x02 vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x3*0x10], 0x03 vshufps zmm4, zmm8, zmm9, 136 vshufps zmm5, zmm8, zmm9, 221 vmovups zmm8, zmmword ptr [r8+rdx-0x20] vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x2*0x10], 0x01 vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x2*0x10], 0x02 vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x2*0x10], 0x03 vmovups zmm9, zmmword ptr [r8+rdx-0x10] vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x1*0x10], 0x01 vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x1*0x10], 0x02 vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x1*0x10], 0x03 vshufps zmm6, zmm8, zmm9, 136 vshufps zmm7, zmm8, zmm9, 221 vpshufd zmm6, zmm6, 0x93 vpshufd zmm7, zmm7, 0x93 mov al, 7 9: vpaddd zmm0, zmm0, zmm4 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 16 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 12 vpaddd zmm0, zmm0, zmm5 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 8 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 7 vpshufd zmm0, zmm0, 0x93 vpshufd zmm3, zmm3, 0x4E vpshufd zmm2, zmm2, 0x39 vpaddd zmm0, zmm0, zmm6 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 16 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 12 vpaddd zmm0, zmm0, zmm7 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 8 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 7 vpshufd zmm0, zmm0, 0x39 vpshufd zmm3, zmm3, 0x4E vpshufd zmm2, zmm2, 0x93 dec al jz 9f vshufps zmm8, zmm4, zmm5, 214 vpshufd zmm9, zmm4, 0x0F vpshufd zmm4, zmm8, 0x39 vshufps zmm8, zmm6, zmm7, 250 vpblendmd zmm9 {k3}, zmm9, zmm8 vpunpcklqdq zmm8, zmm7, zmm5 vpblendmd zmm8 {k4}, zmm8, zmm6 vpshufd zmm8, zmm8, 0x78 vpunpckhdq zmm5, zmm5, zmm7 vpunpckldq zmm6, zmm6, zmm5 vpshufd zmm7, zmm6, 0x1E vmovdqa32 zmm5, zmm9 vmovdqa32 zmm6, zmm8 jmp 9b 9: vpxord zmm0, zmm0, zmm2 vpxord zmm1, zmm1, zmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vextracti32x4 xmmword ptr [rbx+0x4*0x10], zmm0, 0x02 vextracti32x4 xmmword ptr [rbx+0x5*0x10], zmm1, 0x02 vextracti32x4 xmmword ptr [rbx+0x6*0x10], zmm0, 0x03 vextracti32x4 xmmword ptr [rbx+0x7*0x10], zmm1, 0x03 vmovdqa xmm0, xmmword ptr [rsp] vmovdqa xmm2, xmmword ptr [rsp+0x40] vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+0x1*0x10] vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+0x5*0x10] vmovdqa xmmword ptr [rsp], xmm0 vmovdqa xmmword ptr [rsp+0x40], xmm2 add rbx, 128 add rdi, 32 sub rsi, 4 3: test esi, 0x2 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovd xmm13, dword ptr [rsp] vpinsrd xmm13, xmm13, dword ptr [rsp+0x40], 1 vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovd xmm14, dword ptr [rsp+0x4] vpinsrd xmm14, xmm14, dword ptr [rsp+0x44], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vinserti128 ymm13, ymm13, xmm14, 0x01 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x88], eax vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vpbroadcastd ymm8, dword ptr [rsp+0x88] vpblendd ymm3, ymm13, ymm8, 0x88 vmovups ymm8, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm8, ymm9, 136 vshufps ymm5, ymm8, ymm9, 221 vmovups ymm8, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm8, ymm9, 136 vshufps ymm7, ymm8, ymm9, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 16 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 12 vpaddd ymm0, ymm0, ymm5 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 8 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 7 vpshufd ymm0, ymm0, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 16 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 12 vpaddd ymm0, ymm0, ymm7 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 8 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 7 vpshufd ymm0, ymm0, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x93 dec al jz 9f vshufps ymm8, ymm4, ymm5, 214 vpshufd ymm9, ymm4, 0x0F vpshufd ymm4, ymm8, 0x39 vshufps ymm8, ymm6, ymm7, 250 vpblendd ymm9, ymm9, ymm8, 0xAA vpunpcklqdq ymm8, ymm7, ymm5 vpblendd ymm8, ymm8, ymm6, 0x88 vpshufd ymm8, ymm8, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymm5, ymm9 vmovdqa ymm6, ymm8 jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovdqa xmm0, xmmword ptr [rsp] vmovdqa xmm2, xmmword ptr [rsp+0x4*0x10] vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+0x8] vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+0x48] vmovdqa xmmword ptr [rsp], xmm0 vmovdqa xmmword ptr [rsp+0x4*0x10], xmm2 add rbx, 64 add rdi, 16 sub rsi, 2 3: test esi, 0x1 je 4b vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] vmovd xmm14, dword ptr [rsp] vpinsrd xmm14, xmm14, dword ptr [rsp+0x40], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovdqa xmm15, xmmword ptr [BLAKE3_IV+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d vpinsrd xmm3, xmm14, eax, 3 vmovdqa xmm2, xmm15 vmovups xmm8, xmmword ptr [r8+rdx-0x40] vmovups xmm9, xmmword ptr [r8+rdx-0x30] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vmovups xmm9, xmmword ptr [r8+rdx-0x10] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 jmp 4b .p2align 6 _blake3_compress_in_place_avx512: blake3_compress_in_place_avx512: _CET_ENDBR vmovdqu xmm0, xmmword ptr [rdi] vmovdqu xmm1, xmmword ptr [rdi+0x10] movzx eax, r8b movzx edx, dl shl rax, 32 add rdx, rax vmovq xmm3, rcx vmovq xmm4, rdx vpunpcklqdq xmm3, xmm3, xmm4 vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip] vmovups xmm8, xmmword ptr [rsi] vmovups xmm9, xmmword ptr [rsi+0x10] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [rsi+0x20] vmovups xmm9, xmmword ptr [rsi+0x30] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 vmovdqu xmmword ptr [rdi], xmm0 vmovdqu xmmword ptr [rdi+0x10], xmm1 ret .p2align 6 _blake3_compress_xof_avx512: blake3_compress_xof_avx512: _CET_ENDBR vmovdqu xmm0, xmmword ptr [rdi] vmovdqu xmm1, xmmword ptr [rdi+0x10] movzx eax, r8b movzx edx, dl shl rax, 32 add rdx, rax vmovq xmm3, rcx vmovq xmm4, rdx vpunpcklqdq xmm3, xmm3, xmm4 vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip] vmovups xmm8, xmmword ptr [rsi] vmovups xmm9, xmmword ptr [rsi+0x10] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [rsi+0x20] vmovups xmm9, xmmword ptr [rsi+0x30] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 vpxor xmm2, xmm2, [rdi] vpxor xmm3, xmm3, [rdi+0x10] vmovdqu xmmword ptr [r9], xmm0 vmovdqu xmmword ptr [r9+0x10], xmm1 vmovdqu xmmword ptr [r9+0x20], xmm2 vmovdqu xmmword ptr [r9+0x30], xmm3 ret #ifdef __APPLE__ .static_data #else .section .rodata #endif .p2align 6 INDEX0: .long 0, 1, 2, 3, 16, 17, 18, 19 .long 8, 9, 10, 11, 24, 25, 26, 27 INDEX1: .long 4, 5, 6, 7, 20, 21, 22, 23 .long 12, 13, 14, 15, 28, 29, 30, 31 ADD0: .long 0, 1, 2, 3, 4, 5, 6, 7 .long 8, 9, 10, 11, 12, 13, 14, 15 ADD1: .long 1 ADD16: .long 16 BLAKE3_BLOCK_LEN: .long 64 .p2align 6 BLAKE3_IV: BLAKE3_IV_0: .long 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A
semyeong-yu/RadFoam
66,147
external/submodules/mesa/src/util/blake3/blake3_avx2_x86-64_unix.S
#include "mesa_blake3_visibility.h" #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits #endif #if defined(__ELF__) && defined(__CET__) && defined(__has_include) #if __has_include(<cet.h>) #include <cet.h> #endif #endif #if !defined(_CET_ENDBR) #define _CET_ENDBR #endif .intel_syntax noprefix HIDDEN _blake3_hash_many_avx2 HIDDEN blake3_hash_many_avx2 .global _blake3_hash_many_avx2 .global blake3_hash_many_avx2 #ifdef __APPLE__ .text #else .section .text #endif .p2align 6 _blake3_hash_many_avx2: blake3_hash_many_avx2: _CET_ENDBR push r15 push r14 push r13 push r12 push rbx push rbp mov rbp, rsp sub rsp, 680 and rsp, 0xFFFFFFFFFFFFFFC0 neg r9d vmovd xmm0, r9d vpbroadcastd ymm0, xmm0 vmovdqa ymmword ptr [rsp+0x280], ymm0 vpand ymm1, ymm0, ymmword ptr [ADD0+rip] vpand ymm2, ymm0, ymmword ptr [ADD1+rip] vmovdqa ymmword ptr [rsp+0x220], ymm2 vmovd xmm2, r8d vpbroadcastd ymm2, xmm2 vpaddd ymm2, ymm2, ymm1 vmovdqa ymmword ptr [rsp+0x240], ymm2 vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip] vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip] vpcmpgtd ymm2, ymm1, ymm2 shr r8, 32 vmovd xmm3, r8d vpbroadcastd ymm3, xmm3 vpsubd ymm3, ymm3, ymm2 vmovdqa ymmword ptr [rsp+0x260], ymm3 shl rdx, 6 mov qword ptr [rsp+0x2A0], rdx cmp rsi, 8 jc 3f 2: vpbroadcastd ymm0, dword ptr [rcx] vpbroadcastd ymm1, dword ptr [rcx+0x4] vpbroadcastd ymm2, dword ptr [rcx+0x8] vpbroadcastd ymm3, dword ptr [rcx+0xC] vpbroadcastd ymm4, dword ptr [rcx+0x10] vpbroadcastd ymm5, dword ptr [rcx+0x14] vpbroadcastd ymm6, dword ptr [rcx+0x18] vpbroadcastd ymm7, dword ptr [rcx+0x1C] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x20] mov r13, qword ptr [rdi+0x28] mov r14, qword ptr [rdi+0x30] mov r15, qword ptr [rdi+0x38] movzx eax, byte ptr [rbp+0x38] movzx ebx, byte ptr [rbp+0x40] or eax, ebx xor edx, edx .p2align 5 9: movzx ebx, byte ptr [rbp+0x48] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x2A0] cmove eax, ebx mov dword ptr [rsp+0x200], eax vmovups xmm8, xmmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x40] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x40] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x20], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x40], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x60], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x30] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x30] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x80], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0xA0], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0xC0], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0xE0], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x20] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x20] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x100], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x120], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x140], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x160], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x10] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x10] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x180], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x1A0], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x1C0], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x1E0], ymm11 vpbroadcastd ymm15, dword ptr [rsp+0x200] prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] vpaddd ymm0, ymm0, ymmword ptr [rsp] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm0, ymmword ptr [rsp+0x240] vpxor ymm13, ymm1, ymmword ptr [rsp+0x260] vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip] vpxor ymm15, ymm3, ymm15 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip] vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip] vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip] vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip] vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vpxor ymm0, ymm0, ymm8 vpxor ymm1, ymm1, ymm9 vpxor ymm2, ymm2, ymm10 vpxor ymm3, ymm3, ymm11 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpxor ymm4, ymm4, ymm12 vpxor ymm5, ymm5, ymm13 vpxor ymm6, ymm6, ymm14 vpxor ymm7, ymm7, ymm15 movzx eax, byte ptr [rbp+0x38] jne 9b mov rbx, qword ptr [rbp+0x50] vunpcklps ymm8, ymm0, ymm1 vunpcklps ymm9, ymm2, ymm3 vunpckhps ymm10, ymm0, ymm1 vunpcklps ymm11, ymm4, ymm5 vunpcklps ymm0, ymm6, ymm7 vshufps ymm12, ymm8, ymm9, 78 vblendps ymm1, ymm8, ymm12, 0xCC vshufps ymm8, ymm11, ymm0, 78 vunpckhps ymm13, ymm2, ymm3 vblendps ymm2, ymm11, ymm8, 0xCC vblendps ymm3, ymm12, ymm9, 0xCC vperm2f128 ymm12, ymm1, ymm2, 0x20 vmovups ymmword ptr [rbx], ymm12 vunpckhps ymm14, ymm4, ymm5 vblendps ymm4, ymm8, ymm0, 0xCC vunpckhps ymm15, ymm6, ymm7 vperm2f128 ymm7, ymm3, ymm4, 0x20 vmovups ymmword ptr [rbx+0x20], ymm7 vshufps ymm5, ymm10, ymm13, 78 vblendps ymm6, ymm5, ymm13, 0xCC vshufps ymm13, ymm14, ymm15, 78 vblendps ymm10, ymm10, ymm5, 0xCC vblendps ymm14, ymm14, ymm13, 0xCC vperm2f128 ymm8, ymm10, ymm14, 0x20 vmovups ymmword ptr [rbx+0x40], ymm8 vblendps ymm15, ymm13, ymm15, 0xCC vperm2f128 ymm13, ymm6, ymm15, 0x20 vmovups ymmword ptr [rbx+0x60], ymm13 vperm2f128 ymm9, ymm1, ymm2, 0x31 vperm2f128 ymm11, ymm3, ymm4, 0x31 vmovups ymmword ptr [rbx+0x80], ymm9 vperm2f128 ymm14, ymm10, ymm14, 0x31 vperm2f128 ymm15, ymm6, ymm15, 0x31 vmovups ymmword ptr [rbx+0xA0], ymm11 vmovups ymmword ptr [rbx+0xC0], ymm14 vmovups ymmword ptr [rbx+0xE0], ymm15 vmovdqa ymm0, ymmword ptr [rsp+0x220] vpaddd ymm1, ymm0, ymmword ptr [rsp+0x240] vmovdqa ymmword ptr [rsp+0x240], ymm1 vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip] vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip] vpcmpgtd ymm2, ymm0, ymm2 vmovdqa ymm0, ymmword ptr [rsp+0x260] vpsubd ymm2, ymm0, ymm2 vmovdqa ymmword ptr [rsp+0x260], ymm2 add rdi, 64 add rbx, 256 mov qword ptr [rbp+0x50], rbx sub rsi, 8 cmp rsi, 8 jnc 2b test rsi, rsi jnz 3f 4: vzeroupper mov rsp, rbp pop rbp pop rbx pop r12 pop r13 pop r14 pop r15 ret .p2align 5 3: mov rbx, qword ptr [rbp+0x50] mov r15, qword ptr [rsp+0x2A0] movzx r13d, byte ptr [rbp+0x38] movzx r12d, byte ptr [rbp+0x48] test rsi, 0x4 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovdqa ymm8, ymm0 vmovdqa ymm9, ymm1 vbroadcasti128 ymm12, xmmword ptr [rsp+0x240] vbroadcasti128 ymm13, xmmword ptr [rsp+0x260] vpunpckldq ymm14, ymm12, ymm13 vpunpckhdq ymm15, ymm12, ymm13 vpermq ymm14, ymm14, 0x50 vpermq ymm15, ymm15, 0x50 vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip] vpblendd ymm14, ymm14, ymm12, 0x44 vpblendd ymm15, ymm15, ymm12, 0x44 vmovdqa ymmword ptr [rsp], ymm14 vmovdqa ymmword ptr [rsp+0x20], ymm15 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x200], eax vmovups ymm2, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm3, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm2, ymm3, 136 vshufps ymm5, ymm2, ymm3, 221 vmovups ymm2, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm3, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm2, ymm3, 136 vshufps ymm7, ymm2, ymm3, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 vmovups ymm10, ymmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01 vmovups ymm11, ymmword ptr [r10+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01 vshufps ymm12, ymm10, ymm11, 136 vshufps ymm13, ymm10, ymm11, 221 vmovups ymm10, ymmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01 vmovups ymm11, ymmword ptr [r10+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01 vshufps ymm14, ymm10, ymm11, 136 vshufps ymm15, ymm10, ymm11, 221 vpshufd ymm14, ymm14, 0x93 vpshufd ymm15, ymm15, 0x93 prefetcht0 [r8+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r11+rdx+0x80] vpbroadcastd ymm2, dword ptr [rsp+0x200] vmovdqa ymm3, ymmword ptr [rsp] vmovdqa ymm11, ymmword ptr [rsp+0x20] vpblendd ymm3, ymm3, ymm2, 0x88 vpblendd ymm11, ymm11, ymm2, 0x88 vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vmovdqa ymm10, ymm2 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm8, ymm8, ymm12 vmovdqa ymmword ptr [rsp+0x40], ymm4 nop vmovdqa ymmword ptr [rsp+0x60], ymm12 nop vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT16+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 12 vpslld ymm9, ymm9, 20 vpor ymm9, ymm9, ymm4 vpaddd ymm0, ymm0, ymm5 vpaddd ymm8, ymm8, ymm13 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vmovdqa ymmword ptr [rsp+0x80], ymm5 vmovdqa ymmword ptr [rsp+0xA0], ymm13 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT8+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 7 vpslld ymm9, ymm9, 25 vpor ymm9, ymm9, ymm4 vpshufd ymm0, ymm0, 0x93 vpshufd ymm8, ymm8, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm11, ymm11, 0x4E vpshufd ymm2, ymm2, 0x39 vpshufd ymm10, ymm10, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm8, ymm8, ymm14 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT16+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 12 vpslld ymm9, ymm9, 20 vpor ymm9, ymm9, ymm4 vpaddd ymm0, ymm0, ymm7 vpaddd ymm8, ymm8, ymm15 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT8+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 7 vpslld ymm9, ymm9, 25 vpor ymm9, ymm9, ymm4 vpshufd ymm0, ymm0, 0x39 vpshufd ymm8, ymm8, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm11, ymm11, 0x4E vpshufd ymm2, ymm2, 0x93 vpshufd ymm10, ymm10, 0x93 dec al je 9f vmovdqa ymm4, ymmword ptr [rsp+0x40] vmovdqa ymm5, ymmword ptr [rsp+0x80] vshufps ymm12, ymm4, ymm5, 214 vpshufd ymm13, ymm4, 0x0F vpshufd ymm4, ymm12, 0x39 vshufps ymm12, ymm6, ymm7, 250 vpblendd ymm13, ymm13, ymm12, 0xAA vpunpcklqdq ymm12, ymm7, ymm5 vpblendd ymm12, ymm12, ymm6, 0x88 vpshufd ymm12, ymm12, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymmword ptr [rsp+0x40], ymm13 vmovdqa ymmword ptr [rsp+0x80], ymm12 vmovdqa ymm12, ymmword ptr [rsp+0x60] vmovdqa ymm13, ymmword ptr [rsp+0xA0] vshufps ymm5, ymm12, ymm13, 214 vpshufd ymm6, ymm12, 0x0F vpshufd ymm12, ymm5, 0x39 vshufps ymm5, ymm14, ymm15, 250 vpblendd ymm6, ymm6, ymm5, 0xAA vpunpcklqdq ymm5, ymm15, ymm13 vpblendd ymm5, ymm5, ymm14, 0x88 vpshufd ymm5, ymm5, 0x78 vpunpckhdq ymm13, ymm13, ymm15 vpunpckldq ymm14, ymm14, ymm13 vpshufd ymm15, ymm14, 0x1E vmovdqa ymm13, ymm6 vmovdqa ymm14, ymm5 vmovdqa ymm5, ymmword ptr [rsp+0x40] vmovdqa ymm6, ymmword ptr [rsp+0x80] jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 vpxor ymm8, ymm8, ymm10 vpxor ymm9, ymm9, ymm11 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovdqu xmmword ptr [rbx+0x40], xmm8 vmovdqu xmmword ptr [rbx+0x50], xmm9 vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01 vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01 vmovaps xmm8, xmmword ptr [rsp+0x280] vmovaps xmm0, xmmword ptr [rsp+0x240] vmovaps xmm1, xmmword ptr [rsp+0x250] vmovaps xmm2, xmmword ptr [rsp+0x260] vmovaps xmm3, xmmword ptr [rsp+0x270] vblendvps xmm0, xmm0, xmm1, xmm8 vblendvps xmm2, xmm2, xmm3, xmm8 vmovaps xmmword ptr [rsp+0x240], xmm0 vmovaps xmmword ptr [rsp+0x260], xmm2 add rbx, 128 add rdi, 32 sub rsi, 4 3: test rsi, 0x2 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovd xmm13, dword ptr [rsp+0x240] vpinsrd xmm13, xmm13, dword ptr [rsp+0x260], 1 vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovd xmm14, dword ptr [rsp+0x244] vpinsrd xmm14, xmm14, dword ptr [rsp+0x264], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vinserti128 ymm13, ymm13, xmm14, 0x01 vbroadcasti128 ymm14, xmmword ptr [ROT16+rip] vbroadcasti128 ymm15, xmmword ptr [ROT8+rip] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x200], eax vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vpbroadcastd ymm8, dword ptr [rsp+0x200] vpblendd ymm3, ymm13, ymm8, 0x88 vmovups ymm8, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm8, ymm9, 136 vshufps ymm5, ymm8, ymm9, 221 vmovups ymm8, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm8, ymm9, 136 vshufps ymm7, ymm8, ymm9, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm14 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm8 vpaddd ymm0, ymm0, ymm5 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm15 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm8 vpshufd ymm0, ymm0, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm14 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm8 vpaddd ymm0, ymm0, ymm7 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm15 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm8 vpshufd ymm0, ymm0, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x93 dec al jz 9f vshufps ymm8, ymm4, ymm5, 214 vpshufd ymm9, ymm4, 0x0F vpshufd ymm4, ymm8, 0x39 vshufps ymm8, ymm6, ymm7, 250 vpblendd ymm9, ymm9, ymm8, 0xAA vpunpcklqdq ymm8, ymm7, ymm5 vpblendd ymm8, ymm8, ymm6, 0x88 vpshufd ymm8, ymm8, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymm5, ymm9 vmovdqa ymm6, ymm8 jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovaps ymm8, ymmword ptr [rsp+0x280] vmovaps ymm0, ymmword ptr [rsp+0x240] vmovups ymm1, ymmword ptr [rsp+0x248] vmovaps ymm2, ymmword ptr [rsp+0x260] vmovups ymm3, ymmword ptr [rsp+0x268] vblendvps ymm0, ymm0, ymm1, ymm8 vblendvps ymm2, ymm2, ymm3, ymm8 vmovaps ymmword ptr [rsp+0x240], ymm0 vmovaps ymmword ptr [rsp+0x260], ymm2 add rbx, 64 add rdi, 16 sub rsi, 2 3: test rsi, 0x1 je 4b vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] vmovd xmm3, dword ptr [rsp+0x240] vpinsrd xmm3, xmm3, dword ptr [rsp+0x260], 1 vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovdqa xmm14, xmmword ptr [ROT16+rip] vmovdqa xmm15, xmmword ptr [ROT8+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip] vmovdqa xmm3, xmm13 vpinsrd xmm3, xmm3, eax, 3 vmovups xmm8, xmmword ptr [r8+rdx-0x40] vmovups xmm9, xmmword ptr [r8+rdx-0x30] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vmovups xmm9, xmmword ptr [r8+rdx-0x10] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm14 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 12 vpslld xmm1, xmm1, 20 vpor xmm1, xmm1, xmm8 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm15 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 7 vpslld xmm1, xmm1, 25 vpor xmm1, xmm1, xmm8 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm14 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 12 vpslld xmm1, xmm1, 20 vpor xmm1, xmm1, xmm8 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm15 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 7 vpslld xmm1, xmm1, 25 vpor xmm1, xmm1, xmm8 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 jmp 4b #ifdef __APPLE__ .static_data #else .section .rodata #endif .p2align 6 ADD0: .long 0, 1, 2, 3, 4, 5, 6, 7 ADD1: .long 8, 8, 8, 8, 8, 8, 8, 8 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 0x00000040, 0x00000040, 0x00000040, 0x00000040 .long 0x00000040, 0x00000040, 0x00000040, 0x00000040 ROT16: .byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13 ROT8: .byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
semyeong-yu/RadFoam
71,202
external/submodules/mesa/src/util/blake3/blake3_sse2_x86-64_windows_gnu.S
.intel_syntax noprefix .global blake3_hash_many_sse2 .global _blake3_hash_many_sse2 .global blake3_compress_in_place_sse2 .global _blake3_compress_in_place_sse2 .global blake3_compress_xof_sse2 .global _blake3_compress_xof_sse2 .section .text .p2align 6 _blake3_hash_many_sse2: blake3_hash_many_sse2: push r15 push r14 push r13 push r12 push rsi push rdi push rbx push rbp mov rbp, rsp sub rsp, 528 and rsp, 0xFFFFFFFFFFFFFFC0 movdqa xmmword ptr [rsp+0x170], xmm6 movdqa xmmword ptr [rsp+0x180], xmm7 movdqa xmmword ptr [rsp+0x190], xmm8 movdqa xmmword ptr [rsp+0x1A0], xmm9 movdqa xmmword ptr [rsp+0x1B0], xmm10 movdqa xmmword ptr [rsp+0x1C0], xmm11 movdqa xmmword ptr [rsp+0x1D0], xmm12 movdqa xmmword ptr [rsp+0x1E0], xmm13 movdqa xmmword ptr [rsp+0x1F0], xmm14 movdqa xmmword ptr [rsp+0x200], xmm15 mov rdi, rcx mov rsi, rdx mov rdx, r8 mov rcx, r9 mov r8, qword ptr [rbp+0x68] movzx r9, byte ptr [rbp+0x70] neg r9d movd xmm0, r9d pshufd xmm0, xmm0, 0x00 movdqa xmmword ptr [rsp+0x130], xmm0 movdqa xmm1, xmm0 pand xmm1, xmmword ptr [ADD0+rip] pand xmm0, xmmword ptr [ADD1+rip] movdqa xmmword ptr [rsp+0x150], xmm0 movd xmm0, r8d pshufd xmm0, xmm0, 0x00 paddd xmm0, xmm1 movdqa xmmword ptr [rsp+0x110], xmm0 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm1, xmm0 shr r8, 32 movd xmm2, r8d pshufd xmm2, xmm2, 0x00 psubd xmm2, xmm1 movdqa xmmword ptr [rsp+0x120], xmm2 mov rbx, qword ptr [rbp+0x90] mov r15, rdx shl r15, 6 movzx r13d, byte ptr [rbp+0x78] movzx r12d, byte ptr [rbp+0x88] cmp rsi, 4 jc 3f 2: movdqu xmm3, xmmword ptr [rcx] pshufd xmm0, xmm3, 0x00 pshufd xmm1, xmm3, 0x55 pshufd xmm2, xmm3, 0xAA pshufd xmm3, xmm3, 0xFF movdqu xmm7, xmmword ptr [rcx+0x10] pshufd xmm4, xmm7, 0x00 pshufd xmm5, xmm7, 0x55 pshufd xmm6, xmm7, 0xAA pshufd xmm7, xmm7, 0xFF mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx 9: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movdqu xmm8, xmmword ptr [r8+rdx-0x40] movdqu xmm9, xmmword ptr [r9+rdx-0x40] movdqu xmm10, xmmword ptr [r10+rdx-0x40] movdqu xmm11, xmmword ptr [r11+rdx-0x40] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp], xmm8 movdqa xmmword ptr [rsp+0x10], xmm9 movdqa xmmword ptr [rsp+0x20], xmm12 movdqa xmmword ptr [rsp+0x30], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x30] movdqu xmm9, xmmword ptr [r9+rdx-0x30] movdqu xmm10, xmmword ptr [r10+rdx-0x30] movdqu xmm11, xmmword ptr [r11+rdx-0x30] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x40], xmm8 movdqa xmmword ptr [rsp+0x50], xmm9 movdqa xmmword ptr [rsp+0x60], xmm12 movdqa xmmword ptr [rsp+0x70], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x20] movdqu xmm9, xmmword ptr [r9+rdx-0x20] movdqu xmm10, xmmword ptr [r10+rdx-0x20] movdqu xmm11, xmmword ptr [r11+rdx-0x20] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x80], xmm8 movdqa xmmword ptr [rsp+0x90], xmm9 movdqa xmmword ptr [rsp+0xA0], xmm12 movdqa xmmword ptr [rsp+0xB0], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x10] movdqu xmm9, xmmword ptr [r9+rdx-0x10] movdqu xmm10, xmmword ptr [r10+rdx-0x10] movdqu xmm11, xmmword ptr [r11+rdx-0x10] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0xC0], xmm8 movdqa xmmword ptr [rsp+0xD0], xmm9 movdqa xmmword ptr [rsp+0xE0], xmm12 movdqa xmmword ptr [rsp+0xF0], xmm13 movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip] movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip] movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip] movdqa xmm12, xmmword ptr [rsp+0x110] movdqa xmm13, xmmword ptr [rsp+0x120] movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip] movd xmm15, eax pshufd xmm15, xmm15, 0x00 prefetcht0 [r8+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r11+rdx+0x80] paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x80] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x70] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xB0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x50] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xC0] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xA0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0x60] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xF0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 pxor xmm0, xmm8 pxor xmm1, xmm9 pxor xmm2, xmm10 pxor xmm3, xmm11 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 pxor xmm4, xmm12 pxor xmm5, xmm13 pxor xmm6, xmm14 pxor xmm7, xmm15 mov eax, r13d jne 9b movdqa xmm9, xmm0 punpckldq xmm0, xmm1 punpckhdq xmm9, xmm1 movdqa xmm11, xmm2 punpckldq xmm2, xmm3 punpckhdq xmm11, xmm3 movdqa xmm1, xmm0 punpcklqdq xmm0, xmm2 punpckhqdq xmm1, xmm2 movdqa xmm3, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm3, xmm11 movdqu xmmword ptr [rbx], xmm0 movdqu xmmword ptr [rbx+0x20], xmm1 movdqu xmmword ptr [rbx+0x40], xmm9 movdqu xmmword ptr [rbx+0x60], xmm3 movdqa xmm9, xmm4 punpckldq xmm4, xmm5 punpckhdq xmm9, xmm5 movdqa xmm11, xmm6 punpckldq xmm6, xmm7 punpckhdq xmm11, xmm7 movdqa xmm5, xmm4 punpcklqdq xmm4, xmm6 punpckhqdq xmm5, xmm6 movdqa xmm7, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm7, xmm11 movdqu xmmword ptr [rbx+0x10], xmm4 movdqu xmmword ptr [rbx+0x30], xmm5 movdqu xmmword ptr [rbx+0x50], xmm9 movdqu xmmword ptr [rbx+0x70], xmm7 movdqa xmm1, xmmword ptr [rsp+0x110] movdqa xmm0, xmm1 paddd xmm1, xmmword ptr [rsp+0x150] movdqa xmmword ptr [rsp+0x110], xmm1 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm0, xmm1 movdqa xmm1, xmmword ptr [rsp+0x120] psubd xmm1, xmm0 movdqa xmmword ptr [rsp+0x120], xmm1 add rbx, 128 add rdi, 32 sub rsi, 4 cmp rsi, 4 jnc 2b test rsi, rsi jne 3f 4: movdqa xmm6, xmmword ptr [rsp+0x170] movdqa xmm7, xmmword ptr [rsp+0x180] movdqa xmm8, xmmword ptr [rsp+0x190] movdqa xmm9, xmmword ptr [rsp+0x1A0] movdqa xmm10, xmmword ptr [rsp+0x1B0] movdqa xmm11, xmmword ptr [rsp+0x1C0] movdqa xmm12, xmmword ptr [rsp+0x1D0] movdqa xmm13, xmmword ptr [rsp+0x1E0] movdqa xmm14, xmmword ptr [rsp+0x1F0] movdqa xmm15, xmmword ptr [rsp+0x200] mov rsp, rbp pop rbp pop rbx pop rdi pop rsi pop r12 pop r13 pop r14 pop r15 ret .p2align 5 3: test esi, 0x2 je 3f movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm8, xmm0 movaps xmm9, xmm1 movd xmm13, dword ptr [rsp+0x110] movd xmm14, dword ptr [rsp+0x120] punpckldq xmm13, xmm14 movaps xmmword ptr [rsp], xmm13 movd xmm14, dword ptr [rsp+0x114] movd xmm13, dword ptr [rsp+0x124] punpckldq xmm14, xmm13 movaps xmmword ptr [rsp+0x10], xmm14 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movaps xmm10, xmm2 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm3, xmm4 shufps xmm4, xmm5, 136 shufps xmm3, xmm5, 221 movaps xmm5, xmm3 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm3, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm3, xmm7, 221 pshufd xmm7, xmm3, 0x93 movups xmm12, xmmword ptr [r9+rdx-0x40] movups xmm13, xmmword ptr [r9+rdx-0x30] movaps xmm11, xmm12 shufps xmm12, xmm13, 136 shufps xmm11, xmm13, 221 movaps xmm13, xmm11 movups xmm14, xmmword ptr [r9+rdx-0x20] movups xmm15, xmmword ptr [r9+rdx-0x10] movaps xmm11, xmm14 shufps xmm14, xmm15, 136 pshufd xmm14, xmm14, 0x93 shufps xmm11, xmm15, 221 pshufd xmm15, xmm11, 0x93 shl rax, 0x20 or rax, 0x40 movq xmm3, rax movdqa xmmword ptr [rsp+0x20], xmm3 movaps xmm3, xmmword ptr [rsp] movaps xmm11, xmmword ptr [rsp+0x10] punpcklqdq xmm3, xmmword ptr [rsp+0x20] punpcklqdq xmm11, xmmword ptr [rsp+0x20] mov al, 7 9: paddd xmm0, xmm4 paddd xmm8, xmm12 movaps xmmword ptr [rsp+0x20], xmm4 movaps xmmword ptr [rsp+0x30], xmm12 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 pshuflw xmm11, xmm11, 0xB1 pshufhw xmm11, xmm11, 0xB1 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm5 paddd xmm8, xmm13 movaps xmmword ptr [rsp+0x40], xmm5 movaps xmmword ptr [rsp+0x50], xmm13 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movdqa xmm13, xmm3 psrld xmm3, 8 pslld xmm13, 24 pxor xmm3, xmm13 movdqa xmm13, xmm11 psrld xmm11, 8 pslld xmm13, 24 pxor xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x93 pshufd xmm8, xmm8, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x39 pshufd xmm10, xmm10, 0x39 paddd xmm0, xmm6 paddd xmm8, xmm14 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 pshuflw xmm11, xmm11, 0xB1 pshufhw xmm11, xmm11, 0xB1 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm7 paddd xmm8, xmm15 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movdqa xmm13, xmm3 psrld xmm3, 8 pslld xmm13, 24 pxor xmm3, xmm13 movdqa xmm13, xmm11 psrld xmm11, 8 pslld xmm13, 24 pxor xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x39 pshufd xmm8, xmm8, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x93 pshufd xmm10, xmm10, 0x93 dec al je 9f movdqa xmm12, xmmword ptr [rsp+0x20] movdqa xmm5, xmmword ptr [rsp+0x40] pshufd xmm13, xmm12, 0x0F shufps xmm12, xmm5, 214 pshufd xmm4, xmm12, 0x39 movdqa xmm12, xmm6 shufps xmm12, xmm7, 250 pand xmm13, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm13, xmm12 movdqa xmmword ptr [rsp+0x20], xmm13 movdqa xmm12, xmm7 punpcklqdq xmm12, xmm5 movdqa xmm13, xmm6 pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm12, xmm13 pshufd xmm12, xmm12, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmmword ptr [rsp+0x40], xmm12 movdqa xmm5, xmmword ptr [rsp+0x30] movdqa xmm13, xmmword ptr [rsp+0x50] pshufd xmm6, xmm5, 0x0F shufps xmm5, xmm13, 214 pshufd xmm12, xmm5, 0x39 movdqa xmm5, xmm14 shufps xmm5, xmm15, 250 pand xmm6, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm6, xmm5 movdqa xmm5, xmm15 punpcklqdq xmm5, xmm13 movdqa xmmword ptr [rsp+0x30], xmm2 movdqa xmm2, xmm14 pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm5, xmm2 movdqa xmm2, xmmword ptr [rsp+0x30] pshufd xmm5, xmm5, 0x78 punpckhdq xmm13, xmm15 punpckldq xmm14, xmm13 pshufd xmm15, xmm14, 0x1E movdqa xmm13, xmm6 movdqa xmm14, xmm5 movdqa xmm5, xmmword ptr [rsp+0x20] movdqa xmm6, xmmword ptr [rsp+0x40] jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm8, xmm10 pxor xmm9, xmm11 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 movups xmmword ptr [rbx+0x20], xmm8 movups xmmword ptr [rbx+0x30], xmm9 mov eax, dword ptr [rsp+0x130] neg eax mov r10d, dword ptr [rsp+0x110+8*rax] mov r11d, dword ptr [rsp+0x120+8*rax] mov dword ptr [rsp+0x110], r10d mov dword ptr [rsp+0x120], r11d add rdi, 16 add rbx, 64 sub rsi, 2 3: test esi, 0x1 je 4b movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movd xmm13, dword ptr [rsp+0x110] movd xmm14, dword ptr [rsp+0x120] punpckldq xmm13, xmm14 mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] shl rax, 32 or rax, 64 movq xmm12, rax movdqa xmm3, xmm13 punpcklqdq xmm3, xmm12 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm9, xmm8 movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 movdqa xmm10, xmm6 pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm8, xmm10 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 jmp 4b .p2align 6 blake3_compress_in_place_sse2: _blake3_compress_in_place_sse2: sub rsp, 120 movdqa xmmword ptr [rsp], xmm6 movdqa xmmword ptr [rsp+0x10], xmm7 movdqa xmmword ptr [rsp+0x20], xmm8 movdqa xmmword ptr [rsp+0x30], xmm9 movdqa xmmword ptr [rsp+0x40], xmm11 movdqa xmmword ptr [rsp+0x50], xmm14 movdqa xmmword ptr [rsp+0x60], xmm15 movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movzx eax, byte ptr [rsp+0xA0] movzx r8d, r8b shl rax, 32 add r8, rax movq xmm3, r9 movq xmm4, r8 punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rdx] movups xmm5, xmmword ptr [rdx+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rdx+0x20] movups xmm7, xmmword ptr [rdx+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm9, xmm8 movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 movdqa xmm14, xmm6 pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm14, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm8, xmm14 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 movups xmmword ptr [rcx], xmm0 movups xmmword ptr [rcx+0x10], xmm1 movdqa xmm6, xmmword ptr [rsp] movdqa xmm7, xmmword ptr [rsp+0x10] movdqa xmm8, xmmword ptr [rsp+0x20] movdqa xmm9, xmmword ptr [rsp+0x30] movdqa xmm11, xmmword ptr [rsp+0x40] movdqa xmm14, xmmword ptr [rsp+0x50] movdqa xmm15, xmmword ptr [rsp+0x60] add rsp, 120 ret .p2align 6 _blake3_compress_xof_sse2: blake3_compress_xof_sse2: sub rsp, 120 movdqa xmmword ptr [rsp], xmm6 movdqa xmmword ptr [rsp+0x10], xmm7 movdqa xmmword ptr [rsp+0x20], xmm8 movdqa xmmword ptr [rsp+0x30], xmm9 movdqa xmmword ptr [rsp+0x40], xmm11 movdqa xmmword ptr [rsp+0x50], xmm14 movdqa xmmword ptr [rsp+0x60], xmm15 movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movzx eax, byte ptr [rsp+0xA0] movzx r8d, r8b mov r10, qword ptr [rsp+0xA8] shl rax, 32 add r8, rax movq xmm3, r9 movq xmm4, r8 punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rdx] movups xmm5, xmmword ptr [rdx+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rdx+0x20] movups xmm7, xmmword ptr [rdx+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm9, xmm8 movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 movdqa xmm14, xmm6 pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm14, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm8, xmm14 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: movdqu xmm4, xmmword ptr [rcx] movdqu xmm5, xmmword ptr [rcx+0x10] pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm2, xmm4 pxor xmm3, xmm5 movups xmmword ptr [r10], xmm0 movups xmmword ptr [r10+0x10], xmm1 movups xmmword ptr [r10+0x20], xmm2 movups xmmword ptr [r10+0x30], xmm3 movdqa xmm6, xmmword ptr [rsp] movdqa xmm7, xmmword ptr [rsp+0x10] movdqa xmm8, xmmword ptr [rsp+0x20] movdqa xmm9, xmmword ptr [rsp+0x30] movdqa xmm11, xmmword ptr [rsp+0x40] movdqa xmm14, xmmword ptr [rsp+0x50] movdqa xmm15, xmmword ptr [rsp+0x60] add rsp, 120 ret .section .rdata .p2align 6 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85 .long 0x3C6EF372, 0xA54FF53A ADD0: .long 0, 1, 2, 3 ADD1: .long 4, 4, 4, 4 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 64, 64, 64, 64 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 PBLENDW_0x33_MASK: .long 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000 PBLENDW_0xCC_MASK: .long 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF PBLENDW_0x3F_MASK: .long 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 PBLENDW_0xC0_MASK: .long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
semyeong-yu/RadFoam
91,003
external/submodules/mesa/src/util/blake3/blake3_avx512_x86-64_windows_gnu.S
.intel_syntax noprefix .global _blake3_hash_many_avx512 .global blake3_hash_many_avx512 .global blake3_compress_in_place_avx512 .global _blake3_compress_in_place_avx512 .global blake3_compress_xof_avx512 .global _blake3_compress_xof_avx512 .section .text .p2align 6 _blake3_hash_many_avx512: blake3_hash_many_avx512: push r15 push r14 push r13 push r12 push rdi push rsi push rbx push rbp mov rbp, rsp sub rsp, 304 and rsp, 0xFFFFFFFFFFFFFFC0 vmovdqa xmmword ptr [rsp+0x90], xmm6 vmovdqa xmmword ptr [rsp+0xA0], xmm7 vmovdqa xmmword ptr [rsp+0xB0], xmm8 vmovdqa xmmword ptr [rsp+0xC0], xmm9 vmovdqa xmmword ptr [rsp+0xD0], xmm10 vmovdqa xmmword ptr [rsp+0xE0], xmm11 vmovdqa xmmword ptr [rsp+0xF0], xmm12 vmovdqa xmmword ptr [rsp+0x100], xmm13 vmovdqa xmmword ptr [rsp+0x110], xmm14 vmovdqa xmmword ptr [rsp+0x120], xmm15 mov rdi, rcx mov rsi, rdx mov rdx, r8 mov rcx, r9 mov r8, qword ptr [rbp+0x68] movzx r9, byte ptr [rbp+0x70] neg r9 kmovw k1, r9d vmovd xmm0, r8d vpbroadcastd ymm0, xmm0 shr r8, 32 vmovd xmm1, r8d vpbroadcastd ymm1, xmm1 vmovdqa ymm4, ymm1 vmovdqa ymm5, ymm1 vpaddd ymm2, ymm0, ymmword ptr [ADD0+rip] vpaddd ymm3, ymm0, ymmword ptr [ADD0+32+rip] vpcmpltud k2, ymm2, ymm0 vpcmpltud k3, ymm3, ymm0 vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1+rip] {1to8} vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1+rip] {1to8} knotw k2, k1 vmovdqa32 ymm2 {k2}, ymm0 vmovdqa32 ymm3 {k2}, ymm0 vmovdqa32 ymm4 {k2}, ymm1 vmovdqa32 ymm5 {k2}, ymm1 vmovdqa ymmword ptr [rsp], ymm2 vmovdqa ymmword ptr [rsp+0x20], ymm3 vmovdqa ymmword ptr [rsp+0x40], ymm4 vmovdqa ymmword ptr [rsp+0x60], ymm5 shl rdx, 6 mov qword ptr [rsp+0x80], rdx cmp rsi, 16 jc 3f 2: vpbroadcastd zmm0, dword ptr [rcx] vpbroadcastd zmm1, dword ptr [rcx+0x1*0x4] vpbroadcastd zmm2, dword ptr [rcx+0x2*0x4] vpbroadcastd zmm3, dword ptr [rcx+0x3*0x4] vpbroadcastd zmm4, dword ptr [rcx+0x4*0x4] vpbroadcastd zmm5, dword ptr [rcx+0x5*0x4] vpbroadcastd zmm6, dword ptr [rcx+0x6*0x4] vpbroadcastd zmm7, dword ptr [rcx+0x7*0x4] movzx eax, byte ptr [rbp+0x78] movzx ebx, byte ptr [rbp+0x80] or eax, ebx xor edx, edx .p2align 5 9: movzx ebx, byte ptr [rbp+0x88] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x80] cmove eax, ebx mov dword ptr [rsp+0x88], eax mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x40] mov r13, qword ptr [rdi+0x48] mov r14, qword ptr [rdi+0x50] mov r15, qword ptr [rdi+0x58] vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20] vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01 vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20] vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01 vpunpcklqdq zmm8, zmm16, zmm17 vpunpckhqdq zmm9, zmm16, zmm17 vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20] vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01 vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20] vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01 vpunpcklqdq zmm10, zmm18, zmm19 vpunpckhqdq zmm11, zmm18, zmm19 mov r8, qword ptr [rdi+0x20] mov r9, qword ptr [rdi+0x28] mov r10, qword ptr [rdi+0x30] mov r11, qword ptr [rdi+0x38] mov r12, qword ptr [rdi+0x60] mov r13, qword ptr [rdi+0x68] mov r14, qword ptr [rdi+0x70] mov r15, qword ptr [rdi+0x78] vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20] vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01 vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20] vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01 vpunpcklqdq zmm12, zmm16, zmm17 vpunpckhqdq zmm13, zmm16, zmm17 vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20] vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01 vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20] vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01 vpunpcklqdq zmm14, zmm18, zmm19 vpunpckhqdq zmm15, zmm18, zmm19 vmovdqa32 zmm27, zmmword ptr [INDEX0+rip] vmovdqa32 zmm31, zmmword ptr [INDEX1+rip] vshufps zmm16, zmm8, zmm10, 136 vshufps zmm17, zmm12, zmm14, 136 vmovdqa32 zmm20, zmm16 vpermt2d zmm16, zmm27, zmm17 vpermt2d zmm20, zmm31, zmm17 vshufps zmm17, zmm8, zmm10, 221 vshufps zmm30, zmm12, zmm14, 221 vmovdqa32 zmm21, zmm17 vpermt2d zmm17, zmm27, zmm30 vpermt2d zmm21, zmm31, zmm30 vshufps zmm18, zmm9, zmm11, 136 vshufps zmm8, zmm13, zmm15, 136 vmovdqa32 zmm22, zmm18 vpermt2d zmm18, zmm27, zmm8 vpermt2d zmm22, zmm31, zmm8 vshufps zmm19, zmm9, zmm11, 221 vshufps zmm8, zmm13, zmm15, 221 vmovdqa32 zmm23, zmm19 vpermt2d zmm19, zmm27, zmm8 vpermt2d zmm23, zmm31, zmm8 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x40] mov r13, qword ptr [rdi+0x48] mov r14, qword ptr [rdi+0x50] mov r15, qword ptr [rdi+0x58] vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm8, zmm24, zmm25 vpunpckhqdq zmm9, zmm24, zmm25 vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm10, zmm24, zmm25 vpunpckhqdq zmm11, zmm24, zmm25 prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] mov r8, qword ptr [rdi+0x20] mov r9, qword ptr [rdi+0x28] mov r10, qword ptr [rdi+0x30] mov r11, qword ptr [rdi+0x38] mov r12, qword ptr [rdi+0x60] mov r13, qword ptr [rdi+0x68] mov r14, qword ptr [rdi+0x70] mov r15, qword ptr [rdi+0x78] vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm12, zmm24, zmm25 vpunpckhqdq zmm13, zmm24, zmm25 vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20] vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01 vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20] vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01 vpunpcklqdq zmm14, zmm24, zmm25 vpunpckhqdq zmm15, zmm24, zmm25 prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] vshufps zmm24, zmm8, zmm10, 136 vshufps zmm30, zmm12, zmm14, 136 vmovdqa32 zmm28, zmm24 vpermt2d zmm24, zmm27, zmm30 vpermt2d zmm28, zmm31, zmm30 vshufps zmm25, zmm8, zmm10, 221 vshufps zmm30, zmm12, zmm14, 221 vmovdqa32 zmm29, zmm25 vpermt2d zmm25, zmm27, zmm30 vpermt2d zmm29, zmm31, zmm30 vshufps zmm26, zmm9, zmm11, 136 vshufps zmm8, zmm13, zmm15, 136 vmovdqa32 zmm30, zmm26 vpermt2d zmm26, zmm27, zmm8 vpermt2d zmm30, zmm31, zmm8 vshufps zmm8, zmm9, zmm11, 221 vshufps zmm10, zmm13, zmm15, 221 vpermi2d zmm27, zmm8, zmm10 vpermi2d zmm31, zmm8, zmm10 vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0+rip] vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1+rip] vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2+rip] vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3+rip] vmovdqa32 zmm12, zmmword ptr [rsp] vmovdqa32 zmm13, zmmword ptr [rsp+0x1*0x40] vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN+rip] vpbroadcastd zmm15, dword ptr [rsp+0x22*0x4] vpaddd zmm0, zmm0, zmm16 vpaddd zmm1, zmm1, zmm18 vpaddd zmm2, zmm2, zmm20 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm17 vpaddd zmm1, zmm1, zmm19 vpaddd zmm2, zmm2, zmm21 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm24 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm28 vpaddd zmm3, zmm3, zmm30 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm25 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm29 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm18 vpaddd zmm1, zmm1, zmm19 vpaddd zmm2, zmm2, zmm23 vpaddd zmm3, zmm3, zmm20 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm22 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm16 vpaddd zmm3, zmm3, zmm29 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm17 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm25 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm27 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm30 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm19 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm29 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm20 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm18 vpaddd zmm3, zmm3, zmm30 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm22 vpaddd zmm1, zmm1, zmm25 vpaddd zmm2, zmm2, zmm27 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm21 vpaddd zmm1, zmm1, zmm16 vpaddd zmm2, zmm2, zmm31 vpaddd zmm3, zmm3, zmm17 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm26 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm30 vpaddd zmm3, zmm3, zmm29 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm23 vpaddd zmm1, zmm1, zmm25 vpaddd zmm2, zmm2, zmm19 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm20 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm21 vpaddd zmm3, zmm3, zmm17 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm16 vpaddd zmm1, zmm1, zmm18 vpaddd zmm2, zmm2, zmm24 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm28 vpaddd zmm1, zmm1, zmm25 vpaddd zmm2, zmm2, zmm31 vpaddd zmm3, zmm3, zmm30 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm29 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm26 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm23 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm16 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm18 vpaddd zmm1, zmm1, zmm19 vpaddd zmm2, zmm2, zmm17 vpaddd zmm3, zmm3, zmm20 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm25 vpaddd zmm1, zmm1, zmm27 vpaddd zmm2, zmm2, zmm24 vpaddd zmm3, zmm3, zmm31 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm30 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm28 vpaddd zmm3, zmm3, zmm17 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm29 vpaddd zmm1, zmm1, zmm16 vpaddd zmm2, zmm2, zmm18 vpaddd zmm3, zmm3, zmm20 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm19 vpaddd zmm1, zmm1, zmm26 vpaddd zmm2, zmm2, zmm22 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpaddd zmm0, zmm0, zmm27 vpaddd zmm1, zmm1, zmm21 vpaddd zmm2, zmm2, zmm17 vpaddd zmm3, zmm3, zmm24 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vprord zmm15, zmm15, 16 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 12 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vpaddd zmm0, zmm0, zmm31 vpaddd zmm1, zmm1, zmm16 vpaddd zmm2, zmm2, zmm25 vpaddd zmm3, zmm3, zmm22 vpaddd zmm0, zmm0, zmm4 vpaddd zmm1, zmm1, zmm5 vpaddd zmm2, zmm2, zmm6 vpaddd zmm3, zmm3, zmm7 vpxord zmm12, zmm12, zmm0 vpxord zmm13, zmm13, zmm1 vpxord zmm14, zmm14, zmm2 vpxord zmm15, zmm15, zmm3 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vprord zmm15, zmm15, 8 vpaddd zmm8, zmm8, zmm12 vpaddd zmm9, zmm9, zmm13 vpaddd zmm10, zmm10, zmm14 vpaddd zmm11, zmm11, zmm15 vpxord zmm4, zmm4, zmm8 vpxord zmm5, zmm5, zmm9 vpxord zmm6, zmm6, zmm10 vpxord zmm7, zmm7, zmm11 vprord zmm4, zmm4, 7 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vpaddd zmm0, zmm0, zmm30 vpaddd zmm1, zmm1, zmm18 vpaddd zmm2, zmm2, zmm19 vpaddd zmm3, zmm3, zmm23 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 16 vprord zmm12, zmm12, 16 vprord zmm13, zmm13, 16 vprord zmm14, zmm14, 16 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 12 vprord zmm6, zmm6, 12 vprord zmm7, zmm7, 12 vprord zmm4, zmm4, 12 vpaddd zmm0, zmm0, zmm26 vpaddd zmm1, zmm1, zmm28 vpaddd zmm2, zmm2, zmm20 vpaddd zmm3, zmm3, zmm29 vpaddd zmm0, zmm0, zmm5 vpaddd zmm1, zmm1, zmm6 vpaddd zmm2, zmm2, zmm7 vpaddd zmm3, zmm3, zmm4 vpxord zmm15, zmm15, zmm0 vpxord zmm12, zmm12, zmm1 vpxord zmm13, zmm13, zmm2 vpxord zmm14, zmm14, zmm3 vprord zmm15, zmm15, 8 vprord zmm12, zmm12, 8 vprord zmm13, zmm13, 8 vprord zmm14, zmm14, 8 vpaddd zmm10, zmm10, zmm15 vpaddd zmm11, zmm11, zmm12 vpaddd zmm8, zmm8, zmm13 vpaddd zmm9, zmm9, zmm14 vpxord zmm5, zmm5, zmm10 vpxord zmm6, zmm6, zmm11 vpxord zmm7, zmm7, zmm8 vpxord zmm4, zmm4, zmm9 vprord zmm5, zmm5, 7 vprord zmm6, zmm6, 7 vprord zmm7, zmm7, 7 vprord zmm4, zmm4, 7 vpxord zmm0, zmm0, zmm8 vpxord zmm1, zmm1, zmm9 vpxord zmm2, zmm2, zmm10 vpxord zmm3, zmm3, zmm11 vpxord zmm4, zmm4, zmm12 vpxord zmm5, zmm5, zmm13 vpxord zmm6, zmm6, zmm14 vpxord zmm7, zmm7, zmm15 movzx eax, byte ptr [rbp+0x78] jne 9b mov rbx, qword ptr [rbp+0x90] vpunpckldq zmm16, zmm0, zmm1 vpunpckhdq zmm17, zmm0, zmm1 vpunpckldq zmm18, zmm2, zmm3 vpunpckhdq zmm19, zmm2, zmm3 vpunpckldq zmm20, zmm4, zmm5 vpunpckhdq zmm21, zmm4, zmm5 vpunpckldq zmm22, zmm6, zmm7 vpunpckhdq zmm23, zmm6, zmm7 vpunpcklqdq zmm0, zmm16, zmm18 vpunpckhqdq zmm1, zmm16, zmm18 vpunpcklqdq zmm2, zmm17, zmm19 vpunpckhqdq zmm3, zmm17, zmm19 vpunpcklqdq zmm4, zmm20, zmm22 vpunpckhqdq zmm5, zmm20, zmm22 vpunpcklqdq zmm6, zmm21, zmm23 vpunpckhqdq zmm7, zmm21, zmm23 vshufi32x4 zmm16, zmm0, zmm4, 0x88 vshufi32x4 zmm17, zmm1, zmm5, 0x88 vshufi32x4 zmm18, zmm2, zmm6, 0x88 vshufi32x4 zmm19, zmm3, zmm7, 0x88 vshufi32x4 zmm20, zmm0, zmm4, 0xDD vshufi32x4 zmm21, zmm1, zmm5, 0xDD vshufi32x4 zmm22, zmm2, zmm6, 0xDD vshufi32x4 zmm23, zmm3, zmm7, 0xDD vshufi32x4 zmm0, zmm16, zmm17, 0x88 vshufi32x4 zmm1, zmm18, zmm19, 0x88 vshufi32x4 zmm2, zmm20, zmm21, 0x88 vshufi32x4 zmm3, zmm22, zmm23, 0x88 vshufi32x4 zmm4, zmm16, zmm17, 0xDD vshufi32x4 zmm5, zmm18, zmm19, 0xDD vshufi32x4 zmm6, zmm20, zmm21, 0xDD vshufi32x4 zmm7, zmm22, zmm23, 0xDD vmovdqu32 zmmword ptr [rbx], zmm0 vmovdqu32 zmmword ptr [rbx+0x1*0x40], zmm1 vmovdqu32 zmmword ptr [rbx+0x2*0x40], zmm2 vmovdqu32 zmmword ptr [rbx+0x3*0x40], zmm3 vmovdqu32 zmmword ptr [rbx+0x4*0x40], zmm4 vmovdqu32 zmmword ptr [rbx+0x5*0x40], zmm5 vmovdqu32 zmmword ptr [rbx+0x6*0x40], zmm6 vmovdqu32 zmmword ptr [rbx+0x7*0x40], zmm7 vmovdqa32 zmm0, zmmword ptr [rsp] vmovdqa32 zmm1, zmmword ptr [rsp+0x1*0x40] vmovdqa32 zmm2, zmm0 vpaddd zmm2{k1}, zmm0, dword ptr [ADD16+rip] {1to16} vpcmpltud k2, zmm2, zmm0 vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1+rip] {1to16} vmovdqa32 zmmword ptr [rsp], zmm2 vmovdqa32 zmmword ptr [rsp+0x1*0x40], zmm1 add rdi, 128 add rbx, 512 mov qword ptr [rbp+0x90], rbx sub rsi, 16 cmp rsi, 16 jnc 2b test rsi, rsi jne 3f 4: vzeroupper vmovdqa xmm6, xmmword ptr [rsp+0x90] vmovdqa xmm7, xmmword ptr [rsp+0xA0] vmovdqa xmm8, xmmword ptr [rsp+0xB0] vmovdqa xmm9, xmmword ptr [rsp+0xC0] vmovdqa xmm10, xmmword ptr [rsp+0xD0] vmovdqa xmm11, xmmword ptr [rsp+0xE0] vmovdqa xmm12, xmmword ptr [rsp+0xF0] vmovdqa xmm13, xmmword ptr [rsp+0x100] vmovdqa xmm14, xmmword ptr [rsp+0x110] vmovdqa xmm15, xmmword ptr [rsp+0x120] mov rsp, rbp pop rbp pop rbx pop rsi pop rdi pop r12 pop r13 pop r14 pop r15 ret .p2align 6 3: test esi, 0x8 je 3f vpbroadcastd ymm0, dword ptr [rcx] vpbroadcastd ymm1, dword ptr [rcx+0x4] vpbroadcastd ymm2, dword ptr [rcx+0x8] vpbroadcastd ymm3, dword ptr [rcx+0xC] vpbroadcastd ymm4, dword ptr [rcx+0x10] vpbroadcastd ymm5, dword ptr [rcx+0x14] vpbroadcastd ymm6, dword ptr [rcx+0x18] vpbroadcastd ymm7, dword ptr [rcx+0x1C] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x20] mov r13, qword ptr [rdi+0x28] mov r14, qword ptr [rdi+0x30] mov r15, qword ptr [rdi+0x38] movzx eax, byte ptr [rbp+0x78] movzx ebx, byte ptr [rbp+0x80] or eax, ebx xor edx, edx 2: movzx ebx, byte ptr [rbp+0x88] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x80] cmove eax, ebx mov dword ptr [rsp+0x88], eax vmovups xmm8, xmmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x40] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x40] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm16, ymm12, ymm14, 136 vshufps ymm17, ymm12, ymm14, 221 vshufps ymm18, ymm13, ymm15, 136 vshufps ymm19, ymm13, ymm15, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x30] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x30] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm20, ymm12, ymm14, 136 vshufps ymm21, ymm12, ymm14, 221 vshufps ymm22, ymm13, ymm15, 136 vshufps ymm23, ymm13, ymm15, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x20] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x20] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm24, ymm12, ymm14, 136 vshufps ymm25, ymm12, ymm14, 221 vshufps ymm26, ymm13, ymm15, 136 vshufps ymm27, ymm13, ymm15, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x10] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x10] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm28, ymm12, ymm14, 136 vshufps ymm29, ymm12, ymm14, 221 vshufps ymm30, ymm13, ymm15, 136 vshufps ymm31, ymm13, ymm15, 221 vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0+rip] vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1+rip] vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2+rip] vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3+rip] vmovdqa ymm12, ymmword ptr [rsp] vmovdqa ymm13, ymmword ptr [rsp+0x40] vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN+rip] vpbroadcastd ymm15, dword ptr [rsp+0x88] vpaddd ymm0, ymm0, ymm16 vpaddd ymm1, ymm1, ymm18 vpaddd ymm2, ymm2, ymm20 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm17 vpaddd ymm1, ymm1, ymm19 vpaddd ymm2, ymm2, ymm21 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm24 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm28 vpaddd ymm3, ymm3, ymm30 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm25 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm29 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm18 vpaddd ymm1, ymm1, ymm19 vpaddd ymm2, ymm2, ymm23 vpaddd ymm3, ymm3, ymm20 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm22 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm16 vpaddd ymm3, ymm3, ymm29 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm17 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm25 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm27 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm30 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm19 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm29 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm20 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm18 vpaddd ymm3, ymm3, ymm30 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm22 vpaddd ymm1, ymm1, ymm25 vpaddd ymm2, ymm2, ymm27 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm21 vpaddd ymm1, ymm1, ymm16 vpaddd ymm2, ymm2, ymm31 vpaddd ymm3, ymm3, ymm17 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm26 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm30 vpaddd ymm3, ymm3, ymm29 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm23 vpaddd ymm1, ymm1, ymm25 vpaddd ymm2, ymm2, ymm19 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm20 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm21 vpaddd ymm3, ymm3, ymm17 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm16 vpaddd ymm1, ymm1, ymm18 vpaddd ymm2, ymm2, ymm24 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm28 vpaddd ymm1, ymm1, ymm25 vpaddd ymm2, ymm2, ymm31 vpaddd ymm3, ymm3, ymm30 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm29 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm26 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm23 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm16 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm18 vpaddd ymm1, ymm1, ymm19 vpaddd ymm2, ymm2, ymm17 vpaddd ymm3, ymm3, ymm20 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm25 vpaddd ymm1, ymm1, ymm27 vpaddd ymm2, ymm2, ymm24 vpaddd ymm3, ymm3, ymm31 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm30 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm28 vpaddd ymm3, ymm3, ymm17 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm29 vpaddd ymm1, ymm1, ymm16 vpaddd ymm2, ymm2, ymm18 vpaddd ymm3, ymm3, ymm20 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm19 vpaddd ymm1, ymm1, ymm26 vpaddd ymm2, ymm2, ymm22 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpaddd ymm0, ymm0, ymm27 vpaddd ymm1, ymm1, ymm21 vpaddd ymm2, ymm2, ymm17 vpaddd ymm3, ymm3, ymm24 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vprord ymm15, ymm15, 16 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 12 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vpaddd ymm0, ymm0, ymm31 vpaddd ymm1, ymm1, ymm16 vpaddd ymm2, ymm2, ymm25 vpaddd ymm3, ymm3, ymm22 vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxord ymm12, ymm12, ymm0 vpxord ymm13, ymm13, ymm1 vpxord ymm14, ymm14, ymm2 vpxord ymm15, ymm15, ymm3 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vprord ymm15, ymm15, 8 vpaddd ymm8, ymm8, ymm12 vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxord ymm4, ymm4, ymm8 vpxord ymm5, ymm5, ymm9 vpxord ymm6, ymm6, ymm10 vpxord ymm7, ymm7, ymm11 vprord ymm4, ymm4, 7 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vpaddd ymm0, ymm0, ymm30 vpaddd ymm1, ymm1, ymm18 vpaddd ymm2, ymm2, ymm19 vpaddd ymm3, ymm3, ymm23 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 16 vprord ymm12, ymm12, 16 vprord ymm13, ymm13, 16 vprord ymm14, ymm14, 16 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 12 vprord ymm6, ymm6, 12 vprord ymm7, ymm7, 12 vprord ymm4, ymm4, 12 vpaddd ymm0, ymm0, ymm26 vpaddd ymm1, ymm1, ymm28 vpaddd ymm2, ymm2, ymm20 vpaddd ymm3, ymm3, ymm29 vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxord ymm15, ymm15, ymm0 vpxord ymm12, ymm12, ymm1 vpxord ymm13, ymm13, ymm2 vpxord ymm14, ymm14, ymm3 vprord ymm15, ymm15, 8 vprord ymm12, ymm12, 8 vprord ymm13, ymm13, 8 vprord ymm14, ymm14, 8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm8, ymm13 vpaddd ymm9, ymm9, ymm14 vpxord ymm5, ymm5, ymm10 vpxord ymm6, ymm6, ymm11 vpxord ymm7, ymm7, ymm8 vpxord ymm4, ymm4, ymm9 vprord ymm5, ymm5, 7 vprord ymm6, ymm6, 7 vprord ymm7, ymm7, 7 vprord ymm4, ymm4, 7 vpxor ymm0, ymm0, ymm8 vpxor ymm1, ymm1, ymm9 vpxor ymm2, ymm2, ymm10 vpxor ymm3, ymm3, ymm11 vpxor ymm4, ymm4, ymm12 vpxor ymm5, ymm5, ymm13 vpxor ymm6, ymm6, ymm14 vpxor ymm7, ymm7, ymm15 movzx eax, byte ptr [rbp+0x78] jne 2b mov rbx, qword ptr [rbp+0x90] vunpcklps ymm8, ymm0, ymm1 vunpcklps ymm9, ymm2, ymm3 vunpckhps ymm10, ymm0, ymm1 vunpcklps ymm11, ymm4, ymm5 vunpcklps ymm0, ymm6, ymm7 vshufps ymm12, ymm8, ymm9, 78 vblendps ymm1, ymm8, ymm12, 0xCC vshufps ymm8, ymm11, ymm0, 78 vunpckhps ymm13, ymm2, ymm3 vblendps ymm2, ymm11, ymm8, 0xCC vblendps ymm3, ymm12, ymm9, 0xCC vperm2f128 ymm12, ymm1, ymm2, 0x20 vmovups ymmword ptr [rbx], ymm12 vunpckhps ymm14, ymm4, ymm5 vblendps ymm4, ymm8, ymm0, 0xCC vunpckhps ymm15, ymm6, ymm7 vperm2f128 ymm7, ymm3, ymm4, 0x20 vmovups ymmword ptr [rbx+0x20], ymm7 vshufps ymm5, ymm10, ymm13, 78 vblendps ymm6, ymm5, ymm13, 0xCC vshufps ymm13, ymm14, ymm15, 78 vblendps ymm10, ymm10, ymm5, 0xCC vblendps ymm14, ymm14, ymm13, 0xCC vperm2f128 ymm8, ymm10, ymm14, 0x20 vmovups ymmword ptr [rbx+0x40], ymm8 vblendps ymm15, ymm13, ymm15, 0xCC vperm2f128 ymm13, ymm6, ymm15, 0x20 vmovups ymmword ptr [rbx+0x60], ymm13 vperm2f128 ymm9, ymm1, ymm2, 0x31 vperm2f128 ymm11, ymm3, ymm4, 0x31 vmovups ymmword ptr [rbx+0x80], ymm9 vperm2f128 ymm14, ymm10, ymm14, 0x31 vperm2f128 ymm15, ymm6, ymm15, 0x31 vmovups ymmword ptr [rbx+0xA0], ymm11 vmovups ymmword ptr [rbx+0xC0], ymm14 vmovups ymmword ptr [rbx+0xE0], ymm15 vmovdqa ymm0, ymmword ptr [rsp] vmovdqa ymm2, ymmword ptr [rsp+0x40] vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+0x1*0x20] vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+0x3*0x20] vmovdqa ymmword ptr [rsp], ymm0 vmovdqa ymmword ptr [rsp+0x40], ymm2 add rbx, 256 mov qword ptr [rbp+0x90], rbx add rdi, 64 sub rsi, 8 3: mov rbx, qword ptr [rbp+0x90] mov r15, qword ptr [rsp+0x80] movzx r13, byte ptr [rbp+0x78] movzx r12, byte ptr [rbp+0x88] test esi, 0x4 je 3f vbroadcasti32x4 zmm0, xmmword ptr [rcx] vbroadcasti32x4 zmm1, xmmword ptr [rcx+0x1*0x10] vmovdqa xmm12, xmmword ptr [rsp] vmovdqa xmm13, xmmword ptr [rsp+0x40] vpunpckldq xmm14, xmm12, xmm13 vpunpckhdq xmm15, xmm12, xmm13 vpermq ymm14, ymm14, 0xDC vpermq ymm15, ymm15, 0xDC vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN+rip] vinserti64x4 zmm13, zmm14, ymm15, 0x01 mov eax, 17476 kmovw k2, eax vpblendmd zmm13 {k2}, zmm13, zmm12 vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV+rip] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov eax, 43690 kmovw k3, eax mov eax, 34952 kmovw k4, eax movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x88], eax vmovdqa32 zmm2, zmm15 vpbroadcastd zmm8, dword ptr [rsp+0x22*0x4] vpblendmd zmm3 {k4}, zmm13, zmm8 vmovups zmm8, zmmword ptr [r8+rdx-0x1*0x40] vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x4*0x10], 0x01 vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x4*0x10], 0x02 vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x4*0x10], 0x03 vmovups zmm9, zmmword ptr [r8+rdx-0x30] vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x3*0x10], 0x01 vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x3*0x10], 0x02 vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x3*0x10], 0x03 vshufps zmm4, zmm8, zmm9, 136 vshufps zmm5, zmm8, zmm9, 221 vmovups zmm8, zmmword ptr [r8+rdx-0x20] vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x2*0x10], 0x01 vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x2*0x10], 0x02 vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x2*0x10], 0x03 vmovups zmm9, zmmword ptr [r8+rdx-0x10] vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x1*0x10], 0x01 vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x1*0x10], 0x02 vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x1*0x10], 0x03 vshufps zmm6, zmm8, zmm9, 136 vshufps zmm7, zmm8, zmm9, 221 vpshufd zmm6, zmm6, 0x93 vpshufd zmm7, zmm7, 0x93 mov al, 7 9: vpaddd zmm0, zmm0, zmm4 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 16 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 12 vpaddd zmm0, zmm0, zmm5 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 8 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 7 vpshufd zmm0, zmm0, 0x93 vpshufd zmm3, zmm3, 0x4E vpshufd zmm2, zmm2, 0x39 vpaddd zmm0, zmm0, zmm6 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 16 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 12 vpaddd zmm0, zmm0, zmm7 vpaddd zmm0, zmm0, zmm1 vpxord zmm3, zmm3, zmm0 vprord zmm3, zmm3, 8 vpaddd zmm2, zmm2, zmm3 vpxord zmm1, zmm1, zmm2 vprord zmm1, zmm1, 7 vpshufd zmm0, zmm0, 0x39 vpshufd zmm3, zmm3, 0x4E vpshufd zmm2, zmm2, 0x93 dec al jz 9f vshufps zmm8, zmm4, zmm5, 214 vpshufd zmm9, zmm4, 0x0F vpshufd zmm4, zmm8, 0x39 vshufps zmm8, zmm6, zmm7, 250 vpblendmd zmm9 {k3}, zmm9, zmm8 vpunpcklqdq zmm8, zmm7, zmm5 vpblendmd zmm8 {k4}, zmm8, zmm6 vpshufd zmm8, zmm8, 0x78 vpunpckhdq zmm5, zmm5, zmm7 vpunpckldq zmm6, zmm6, zmm5 vpshufd zmm7, zmm6, 0x1E vmovdqa32 zmm5, zmm9 vmovdqa32 zmm6, zmm8 jmp 9b 9: vpxord zmm0, zmm0, zmm2 vpxord zmm1, zmm1, zmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vextracti32x4 xmmword ptr [rbx+0x4*0x10], zmm0, 0x02 vextracti32x4 xmmword ptr [rbx+0x5*0x10], zmm1, 0x02 vextracti32x4 xmmword ptr [rbx+0x6*0x10], zmm0, 0x03 vextracti32x4 xmmword ptr [rbx+0x7*0x10], zmm1, 0x03 vmovdqa xmm0, xmmword ptr [rsp] vmovdqa xmm2, xmmword ptr [rsp+0x40] vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+0x1*0x10] vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+0x5*0x10] vmovdqa xmmword ptr [rsp], xmm0 vmovdqa xmmword ptr [rsp+0x40], xmm2 add rbx, 128 add rdi, 32 sub rsi, 4 3: test esi, 0x2 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovd xmm13, dword ptr [rsp] vpinsrd xmm13, xmm13, dword ptr [rsp+0x40], 1 vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovd xmm14, dword ptr [rsp+0x4] vpinsrd xmm14, xmm14, dword ptr [rsp+0x44], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vinserti128 ymm13, ymm13, xmm14, 0x01 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x88], eax vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vpbroadcastd ymm8, dword ptr [rsp+0x88] vpblendd ymm3, ymm13, ymm8, 0x88 vmovups ymm8, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm8, ymm9, 136 vshufps ymm5, ymm8, ymm9, 221 vmovups ymm8, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm8, ymm9, 136 vshufps ymm7, ymm8, ymm9, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 16 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 12 vpaddd ymm0, ymm0, ymm5 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 8 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 7 vpshufd ymm0, ymm0, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 16 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 12 vpaddd ymm0, ymm0, ymm7 vpaddd ymm0, ymm0, ymm1 vpxord ymm3, ymm3, ymm0 vprord ymm3, ymm3, 8 vpaddd ymm2, ymm2, ymm3 vpxord ymm1, ymm1, ymm2 vprord ymm1, ymm1, 7 vpshufd ymm0, ymm0, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x93 dec al jz 9f vshufps ymm8, ymm4, ymm5, 214 vpshufd ymm9, ymm4, 0x0F vpshufd ymm4, ymm8, 0x39 vshufps ymm8, ymm6, ymm7, 250 vpblendd ymm9, ymm9, ymm8, 0xAA vpunpcklqdq ymm8, ymm7, ymm5 vpblendd ymm8, ymm8, ymm6, 0x88 vpshufd ymm8, ymm8, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymm5, ymm9 vmovdqa ymm6, ymm8 jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovdqa xmm0, xmmword ptr [rsp] vmovdqa xmm2, xmmword ptr [rsp+0x40] vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+0x8] vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+0x48] vmovdqa xmmword ptr [rsp], xmm0 vmovdqa xmmword ptr [rsp+0x40], xmm2 add rbx, 64 add rdi, 16 sub rsi, 2 3: test esi, 0x1 je 4b vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] vmovd xmm14, dword ptr [rsp] vpinsrd xmm14, xmm14, dword ptr [rsp+0x40], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovdqa xmm15, xmmword ptr [BLAKE3_IV+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d vpinsrd xmm3, xmm14, eax, 3 vmovdqa xmm2, xmm15 vmovups xmm8, xmmword ptr [r8+rdx-0x40] vmovups xmm9, xmmword ptr [r8+rdx-0x30] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vmovups xmm9, xmmword ptr [r8+rdx-0x10] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 jmp 4b .p2align 6 _blake3_compress_in_place_avx512: blake3_compress_in_place_avx512: sub rsp, 72 vmovdqa xmmword ptr [rsp], xmm6 vmovdqa xmmword ptr [rsp+0x10], xmm7 vmovdqa xmmword ptr [rsp+0x20], xmm8 vmovdqa xmmword ptr [rsp+0x30], xmm9 vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] movzx eax, byte ptr [rsp+0x70] movzx r8d, r8b shl rax, 32 add r8, rax vmovq xmm3, r9 vmovq xmm4, r8 vpunpcklqdq xmm3, xmm3, xmm4 vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip] vmovups xmm8, xmmword ptr [rdx] vmovups xmm9, xmmword ptr [rdx+0x10] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [rdx+0x20] vmovups xmm9, xmmword ptr [rdx+0x30] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 vmovdqu xmmword ptr [rcx], xmm0 vmovdqu xmmword ptr [rcx+0x10], xmm1 vmovdqa xmm6, xmmword ptr [rsp] vmovdqa xmm7, xmmword ptr [rsp+0x10] vmovdqa xmm8, xmmword ptr [rsp+0x20] vmovdqa xmm9, xmmword ptr [rsp+0x30] add rsp, 72 ret .p2align 6 _blake3_compress_xof_avx512: blake3_compress_xof_avx512: sub rsp, 72 vmovdqa xmmword ptr [rsp], xmm6 vmovdqa xmmword ptr [rsp+0x10], xmm7 vmovdqa xmmword ptr [rsp+0x20], xmm8 vmovdqa xmmword ptr [rsp+0x30], xmm9 vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] movzx eax, byte ptr [rsp+0x70] movzx r8d, r8b mov r10, qword ptr [rsp+0x78] shl rax, 32 add r8, rax vmovq xmm3, r9 vmovq xmm4, r8 vpunpcklqdq xmm3, xmm3, xmm4 vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip] vmovups xmm8, xmmword ptr [rdx] vmovups xmm9, xmmword ptr [rdx+0x10] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [rdx+0x20] vmovups xmm9, xmmword ptr [rdx+0x30] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 16 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 12 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxord xmm3, xmm3, xmm0 vprord xmm3, xmm3, 8 vpaddd xmm2, xmm2, xmm3 vpxord xmm1, xmm1, xmm2 vprord xmm1, xmm1, 7 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 vpxor xmm2, xmm2, xmmword ptr [rcx] vpxor xmm3, xmm3, xmmword ptr [rcx+0x10] vmovdqu xmmword ptr [r10], xmm0 vmovdqu xmmword ptr [r10+0x10], xmm1 vmovdqu xmmword ptr [r10+0x20], xmm2 vmovdqu xmmword ptr [r10+0x30], xmm3 vmovdqa xmm6, xmmword ptr [rsp] vmovdqa xmm7, xmmword ptr [rsp+0x10] vmovdqa xmm8, xmmword ptr [rsp+0x20] vmovdqa xmm9, xmmword ptr [rsp+0x30] add rsp, 72 ret .section .rdata .p2align 6 INDEX0: .long 0, 1, 2, 3, 16, 17, 18, 19 .long 8, 9, 10, 11, 24, 25, 26, 27 INDEX1: .long 4, 5, 6, 7, 20, 21, 22, 23 .long 12, 13, 14, 15, 28, 29, 30, 31 ADD0: .long 0, 1, 2, 3, 4, 5, 6, 7 .long 8, 9, 10, 11, 12, 13, 14, 15 ADD1: .long 1 ADD16: .long 16 BLAKE3_BLOCK_LEN: .long 64 .p2align 6 BLAKE3_IV: BLAKE3_IV_0: .long 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A
semyeong-yu/RadFoam
61,385
external/submodules/mesa/src/util/blake3/blake3_sse41_x86-64_unix.S
#include "mesa_blake3_visibility.h" #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits #endif #if defined(__ELF__) && defined(__CET__) && defined(__has_include) #if __has_include(<cet.h>) #include <cet.h> #endif #endif #if !defined(_CET_ENDBR) #define _CET_ENDBR #endif .intel_syntax noprefix HIDDEN blake3_hash_many_sse41 HIDDEN _blake3_hash_many_sse41 HIDDEN blake3_compress_in_place_sse41 HIDDEN _blake3_compress_in_place_sse41 HIDDEN blake3_compress_xof_sse41 HIDDEN _blake3_compress_xof_sse41 .global blake3_hash_many_sse41 .global _blake3_hash_many_sse41 .global blake3_compress_in_place_sse41 .global _blake3_compress_in_place_sse41 .global blake3_compress_xof_sse41 .global _blake3_compress_xof_sse41 #ifdef __APPLE__ .text #else .section .text #endif .p2align 6 _blake3_hash_many_sse41: blake3_hash_many_sse41: _CET_ENDBR push r15 push r14 push r13 push r12 push rbx push rbp mov rbp, rsp sub rsp, 360 and rsp, 0xFFFFFFFFFFFFFFC0 neg r9d movd xmm0, r9d pshufd xmm0, xmm0, 0x00 movdqa xmmword ptr [rsp+0x130], xmm0 movdqa xmm1, xmm0 pand xmm1, xmmword ptr [ADD0+rip] pand xmm0, xmmword ptr [ADD1+rip] movdqa xmmword ptr [rsp+0x150], xmm0 movd xmm0, r8d pshufd xmm0, xmm0, 0x00 paddd xmm0, xmm1 movdqa xmmword ptr [rsp+0x110], xmm0 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm1, xmm0 shr r8, 32 movd xmm2, r8d pshufd xmm2, xmm2, 0x00 psubd xmm2, xmm1 movdqa xmmword ptr [rsp+0x120], xmm2 mov rbx, qword ptr [rbp+0x50] mov r15, rdx shl r15, 6 movzx r13d, byte ptr [rbp+0x38] movzx r12d, byte ptr [rbp+0x48] cmp rsi, 4 jc 3f 2: movdqu xmm3, xmmword ptr [rcx] pshufd xmm0, xmm3, 0x00 pshufd xmm1, xmm3, 0x55 pshufd xmm2, xmm3, 0xAA pshufd xmm3, xmm3, 0xFF movdqu xmm7, xmmword ptr [rcx+0x10] pshufd xmm4, xmm7, 0x00 pshufd xmm5, xmm7, 0x55 pshufd xmm6, xmm7, 0xAA pshufd xmm7, xmm7, 0xFF mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx 9: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movdqu xmm8, xmmword ptr [r8+rdx-0x40] movdqu xmm9, xmmword ptr [r9+rdx-0x40] movdqu xmm10, xmmword ptr [r10+rdx-0x40] movdqu xmm11, xmmword ptr [r11+rdx-0x40] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp], xmm8 movdqa xmmword ptr [rsp+0x10], xmm9 movdqa xmmword ptr [rsp+0x20], xmm12 movdqa xmmword ptr [rsp+0x30], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x30] movdqu xmm9, xmmword ptr [r9+rdx-0x30] movdqu xmm10, xmmword ptr [r10+rdx-0x30] movdqu xmm11, xmmword ptr [r11+rdx-0x30] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x40], xmm8 movdqa xmmword ptr [rsp+0x50], xmm9 movdqa xmmword ptr [rsp+0x60], xmm12 movdqa xmmword ptr [rsp+0x70], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x20] movdqu xmm9, xmmword ptr [r9+rdx-0x20] movdqu xmm10, xmmword ptr [r10+rdx-0x20] movdqu xmm11, xmmword ptr [r11+rdx-0x20] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x80], xmm8 movdqa xmmword ptr [rsp+0x90], xmm9 movdqa xmmword ptr [rsp+0xA0], xmm12 movdqa xmmword ptr [rsp+0xB0], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x10] movdqu xmm9, xmmword ptr [r9+rdx-0x10] movdqu xmm10, xmmword ptr [r10+rdx-0x10] movdqu xmm11, xmmword ptr [r11+rdx-0x10] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0xC0], xmm8 movdqa xmmword ptr [rsp+0xD0], xmm9 movdqa xmmword ptr [rsp+0xE0], xmm12 movdqa xmmword ptr [rsp+0xF0], xmm13 movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip] movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip] movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip] movdqa xmm12, xmmword ptr [rsp+0x110] movdqa xmm13, xmmword ptr [rsp+0x120] movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip] movd xmm15, eax pshufd xmm15, xmm15, 0x00 prefetcht0 [r8+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r11+rdx+0x80] paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x80] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x70] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xB0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x50] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xC0] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xA0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0x60] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xF0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 pxor xmm0, xmm8 pxor xmm1, xmm9 pxor xmm2, xmm10 pxor xmm3, xmm11 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 pxor xmm4, xmm12 pxor xmm5, xmm13 pxor xmm6, xmm14 pxor xmm7, xmm15 mov eax, r13d jne 9b movdqa xmm9, xmm0 punpckldq xmm0, xmm1 punpckhdq xmm9, xmm1 movdqa xmm11, xmm2 punpckldq xmm2, xmm3 punpckhdq xmm11, xmm3 movdqa xmm1, xmm0 punpcklqdq xmm0, xmm2 punpckhqdq xmm1, xmm2 movdqa xmm3, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm3, xmm11 movdqu xmmword ptr [rbx], xmm0 movdqu xmmword ptr [rbx+0x20], xmm1 movdqu xmmword ptr [rbx+0x40], xmm9 movdqu xmmword ptr [rbx+0x60], xmm3 movdqa xmm9, xmm4 punpckldq xmm4, xmm5 punpckhdq xmm9, xmm5 movdqa xmm11, xmm6 punpckldq xmm6, xmm7 punpckhdq xmm11, xmm7 movdqa xmm5, xmm4 punpcklqdq xmm4, xmm6 punpckhqdq xmm5, xmm6 movdqa xmm7, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm7, xmm11 movdqu xmmword ptr [rbx+0x10], xmm4 movdqu xmmword ptr [rbx+0x30], xmm5 movdqu xmmword ptr [rbx+0x50], xmm9 movdqu xmmword ptr [rbx+0x70], xmm7 movdqa xmm1, xmmword ptr [rsp+0x110] movdqa xmm0, xmm1 paddd xmm1, xmmword ptr [rsp+0x150] movdqa xmmword ptr [rsp+0x110], xmm1 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm0, xmm1 movdqa xmm1, xmmword ptr [rsp+0x120] psubd xmm1, xmm0 movdqa xmmword ptr [rsp+0x120], xmm1 add rbx, 128 add rdi, 32 sub rsi, 4 cmp rsi, 4 jnc 2b test rsi, rsi jnz 3f 4: mov rsp, rbp pop rbp pop rbx pop r12 pop r13 pop r14 pop r15 ret .p2align 5 3: test esi, 0x2 je 3f movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm8, xmm0 movaps xmm9, xmm1 movd xmm13, dword ptr [rsp+0x110] pinsrd xmm13, dword ptr [rsp+0x120], 1 pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 movaps xmmword ptr [rsp], xmm13 movd xmm14, dword ptr [rsp+0x114] pinsrd xmm14, dword ptr [rsp+0x124], 1 pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 movaps xmmword ptr [rsp+0x10], xmm14 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movaps xmm10, xmm2 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm3, xmm4 shufps xmm4, xmm5, 136 shufps xmm3, xmm5, 221 movaps xmm5, xmm3 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm3, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm3, xmm7, 221 pshufd xmm7, xmm3, 0x93 movups xmm12, xmmword ptr [r9+rdx-0x40] movups xmm13, xmmword ptr [r9+rdx-0x30] movaps xmm11, xmm12 shufps xmm12, xmm13, 136 shufps xmm11, xmm13, 221 movaps xmm13, xmm11 movups xmm14, xmmword ptr [r9+rdx-0x20] movups xmm15, xmmword ptr [r9+rdx-0x10] movaps xmm11, xmm14 shufps xmm14, xmm15, 136 pshufd xmm14, xmm14, 0x93 shufps xmm11, xmm15, 221 pshufd xmm15, xmm11, 0x93 movaps xmm3, xmmword ptr [rsp] movaps xmm11, xmmword ptr [rsp+0x10] pinsrd xmm3, eax, 3 pinsrd xmm11, eax, 3 mov al, 7 9: paddd xmm0, xmm4 paddd xmm8, xmm12 movaps xmmword ptr [rsp+0x20], xmm4 movaps xmmword ptr [rsp+0x30], xmm12 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movaps xmm12, xmmword ptr [ROT16+rip] pshufb xmm3, xmm12 pshufb xmm11, xmm12 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm5 paddd xmm8, xmm13 movaps xmmword ptr [rsp+0x40], xmm5 movaps xmmword ptr [rsp+0x50], xmm13 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movaps xmm13, xmmword ptr [ROT8+rip] pshufb xmm3, xmm13 pshufb xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x93 pshufd xmm8, xmm8, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x39 pshufd xmm10, xmm10, 0x39 paddd xmm0, xmm6 paddd xmm8, xmm14 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshufb xmm3, xmm12 pshufb xmm11, xmm12 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm7 paddd xmm8, xmm15 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshufb xmm3, xmm13 pshufb xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x39 pshufd xmm8, xmm8, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x93 pshufd xmm10, xmm10, 0x93 dec al je 9f movdqa xmm12, xmmword ptr [rsp+0x20] movdqa xmm5, xmmword ptr [rsp+0x40] pshufd xmm13, xmm12, 0x0F shufps xmm12, xmm5, 214 pshufd xmm4, xmm12, 0x39 movdqa xmm12, xmm6 shufps xmm12, xmm7, 250 pblendw xmm13, xmm12, 0xCC movdqa xmm12, xmm7 punpcklqdq xmm12, xmm5 pblendw xmm12, xmm6, 0xC0 pshufd xmm12, xmm12, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmmword ptr [rsp+0x20], xmm13 movdqa xmmword ptr [rsp+0x40], xmm12 movdqa xmm5, xmmword ptr [rsp+0x30] movdqa xmm13, xmmword ptr [rsp+0x50] pshufd xmm6, xmm5, 0x0F shufps xmm5, xmm13, 214 pshufd xmm12, xmm5, 0x39 movdqa xmm5, xmm14 shufps xmm5, xmm15, 250 pblendw xmm6, xmm5, 0xCC movdqa xmm5, xmm15 punpcklqdq xmm5, xmm13 pblendw xmm5, xmm14, 0xC0 pshufd xmm5, xmm5, 0x78 punpckhdq xmm13, xmm15 punpckldq xmm14, xmm13 pshufd xmm15, xmm14, 0x1E movdqa xmm13, xmm6 movdqa xmm14, xmm5 movdqa xmm5, xmmword ptr [rsp+0x20] movdqa xmm6, xmmword ptr [rsp+0x40] jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm8, xmm10 pxor xmm9, xmm11 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 movups xmmword ptr [rbx+0x20], xmm8 movups xmmword ptr [rbx+0x30], xmm9 movdqa xmm0, xmmword ptr [rsp+0x130] movdqa xmm1, xmmword ptr [rsp+0x110] movdqa xmm2, xmmword ptr [rsp+0x120] movdqu xmm3, xmmword ptr [rsp+0x118] movdqu xmm4, xmmword ptr [rsp+0x128] blendvps xmm1, xmm3, xmm0 blendvps xmm2, xmm4, xmm0 movdqa xmmword ptr [rsp+0x110], xmm1 movdqa xmmword ptr [rsp+0x120], xmm2 add rdi, 16 add rbx, 64 sub rsi, 2 3: test esi, 0x1 je 4b movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movd xmm13, dword ptr [rsp+0x110] pinsrd xmm13, dword ptr [rsp+0x120], 1 pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 movaps xmm14, xmmword ptr [ROT8+rip] movaps xmm15, xmmword ptr [ROT16+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movaps xmm3, xmm13 pinsrd xmm3, eax, 3 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pblendw xmm9, xmm8, 0xCC movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 pblendw xmm8, xmm6, 0xC0 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 jmp 4b .p2align 6 blake3_compress_in_place_sse41: _blake3_compress_in_place_sse41: _CET_ENDBR movups xmm0, xmmword ptr [rdi] movups xmm1, xmmword ptr [rdi+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] shl r8, 32 add rdx, r8 movq xmm3, rcx movq xmm4, rdx punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rsi] movups xmm5, xmmword ptr [rsi+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rsi+0x20] movups xmm7, xmmword ptr [rsi+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 movaps xmm14, xmmword ptr [ROT8+rip] movaps xmm15, xmmword ptr [ROT16+rip] mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pblendw xmm9, xmm8, 0xCC movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 pblendw xmm8, xmm6, 0xC0 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 movups xmmword ptr [rdi], xmm0 movups xmmword ptr [rdi+0x10], xmm1 ret .p2align 6 blake3_compress_xof_sse41: _blake3_compress_xof_sse41: _CET_ENDBR movups xmm0, xmmword ptr [rdi] movups xmm1, xmmword ptr [rdi+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movzx eax, r8b movzx edx, dl shl rax, 32 add rdx, rax movq xmm3, rcx movq xmm4, rdx punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rsi] movups xmm5, xmmword ptr [rsi+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rsi+0x20] movups xmm7, xmmword ptr [rsi+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 movaps xmm14, xmmword ptr [ROT8+rip] movaps xmm15, xmmword ptr [ROT16+rip] mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pblendw xmm9, xmm8, 0xCC movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 pblendw xmm8, xmm6, 0xC0 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: movdqu xmm4, xmmword ptr [rdi] movdqu xmm5, xmmword ptr [rdi+0x10] pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm2, xmm4 pxor xmm3, xmm5 movups xmmword ptr [r9], xmm0 movups xmmword ptr [r9+0x10], xmm1 movups xmmword ptr [r9+0x20], xmm2 movups xmmword ptr [r9+0x30], xmm3 ret #ifdef __APPLE__ .static_data #else .section .rodata #endif .p2align 6 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85 .long 0x3C6EF372, 0xA54FF53A ROT16: .byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13 ROT8: .byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12 ADD0: .long 0, 1, 2, 3 ADD1: .long 4, 4, 4, 4 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 64, 64, 64, 64 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000
semyeong-yu/RadFoam
63,459
external/submodules/mesa/src/util/blake3/blake3_sse41_x86-64_windows_gnu.S
.intel_syntax noprefix .global blake3_hash_many_sse41 .global _blake3_hash_many_sse41 .global blake3_compress_in_place_sse41 .global _blake3_compress_in_place_sse41 .global blake3_compress_xof_sse41 .global _blake3_compress_xof_sse41 .section .text .p2align 6 _blake3_hash_many_sse41: blake3_hash_many_sse41: push r15 push r14 push r13 push r12 push rsi push rdi push rbx push rbp mov rbp, rsp sub rsp, 528 and rsp, 0xFFFFFFFFFFFFFFC0 movdqa xmmword ptr [rsp+0x170], xmm6 movdqa xmmword ptr [rsp+0x180], xmm7 movdqa xmmword ptr [rsp+0x190], xmm8 movdqa xmmword ptr [rsp+0x1A0], xmm9 movdqa xmmword ptr [rsp+0x1B0], xmm10 movdqa xmmword ptr [rsp+0x1C0], xmm11 movdqa xmmword ptr [rsp+0x1D0], xmm12 movdqa xmmword ptr [rsp+0x1E0], xmm13 movdqa xmmword ptr [rsp+0x1F0], xmm14 movdqa xmmword ptr [rsp+0x200], xmm15 mov rdi, rcx mov rsi, rdx mov rdx, r8 mov rcx, r9 mov r8, qword ptr [rbp+0x68] movzx r9, byte ptr [rbp+0x70] neg r9d movd xmm0, r9d pshufd xmm0, xmm0, 0x00 movdqa xmmword ptr [rsp+0x130], xmm0 movdqa xmm1, xmm0 pand xmm1, xmmword ptr [ADD0+rip] pand xmm0, xmmword ptr [ADD1+rip] movdqa xmmword ptr [rsp+0x150], xmm0 movd xmm0, r8d pshufd xmm0, xmm0, 0x00 paddd xmm0, xmm1 movdqa xmmword ptr [rsp+0x110], xmm0 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm1, xmm0 shr r8, 32 movd xmm2, r8d pshufd xmm2, xmm2, 0x00 psubd xmm2, xmm1 movdqa xmmword ptr [rsp+0x120], xmm2 mov rbx, qword ptr [rbp+0x90] mov r15, rdx shl r15, 6 movzx r13d, byte ptr [rbp+0x78] movzx r12d, byte ptr [rbp+0x88] cmp rsi, 4 jc 3f 2: movdqu xmm3, xmmword ptr [rcx] pshufd xmm0, xmm3, 0x00 pshufd xmm1, xmm3, 0x55 pshufd xmm2, xmm3, 0xAA pshufd xmm3, xmm3, 0xFF movdqu xmm7, xmmword ptr [rcx+0x10] pshufd xmm4, xmm7, 0x00 pshufd xmm5, xmm7, 0x55 pshufd xmm6, xmm7, 0xAA pshufd xmm7, xmm7, 0xFF mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx 9: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movdqu xmm8, xmmword ptr [r8+rdx-0x40] movdqu xmm9, xmmword ptr [r9+rdx-0x40] movdqu xmm10, xmmword ptr [r10+rdx-0x40] movdqu xmm11, xmmword ptr [r11+rdx-0x40] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp], xmm8 movdqa xmmword ptr [rsp+0x10], xmm9 movdqa xmmword ptr [rsp+0x20], xmm12 movdqa xmmword ptr [rsp+0x30], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x30] movdqu xmm9, xmmword ptr [r9+rdx-0x30] movdqu xmm10, xmmword ptr [r10+rdx-0x30] movdqu xmm11, xmmword ptr [r11+rdx-0x30] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x40], xmm8 movdqa xmmword ptr [rsp+0x50], xmm9 movdqa xmmword ptr [rsp+0x60], xmm12 movdqa xmmword ptr [rsp+0x70], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x20] movdqu xmm9, xmmword ptr [r9+rdx-0x20] movdqu xmm10, xmmword ptr [r10+rdx-0x20] movdqu xmm11, xmmword ptr [r11+rdx-0x20] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x80], xmm8 movdqa xmmword ptr [rsp+0x90], xmm9 movdqa xmmword ptr [rsp+0xA0], xmm12 movdqa xmmword ptr [rsp+0xB0], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x10] movdqu xmm9, xmmword ptr [r9+rdx-0x10] movdqu xmm10, xmmword ptr [r10+rdx-0x10] movdqu xmm11, xmmword ptr [r11+rdx-0x10] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0xC0], xmm8 movdqa xmmword ptr [rsp+0xD0], xmm9 movdqa xmmword ptr [rsp+0xE0], xmm12 movdqa xmmword ptr [rsp+0xF0], xmm13 movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip] movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip] movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip] movdqa xmm12, xmmword ptr [rsp+0x110] movdqa xmm13, xmmword ptr [rsp+0x120] movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip] movd xmm15, eax pshufd xmm15, xmm15, 0x00 prefetcht0 [r8+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r11+rdx+0x80] paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x80] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x70] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xB0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x50] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xC0] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xA0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0x60] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xF0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 pshufb xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT16+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmmword ptr [ROT8+rip] pshufb xmm15, xmm8 pshufb xmm12, xmm8 pshufb xmm13, xmm8 pshufb xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 pxor xmm0, xmm8 pxor xmm1, xmm9 pxor xmm2, xmm10 pxor xmm3, xmm11 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 pxor xmm4, xmm12 pxor xmm5, xmm13 pxor xmm6, xmm14 pxor xmm7, xmm15 mov eax, r13d jne 9b movdqa xmm9, xmm0 punpckldq xmm0, xmm1 punpckhdq xmm9, xmm1 movdqa xmm11, xmm2 punpckldq xmm2, xmm3 punpckhdq xmm11, xmm3 movdqa xmm1, xmm0 punpcklqdq xmm0, xmm2 punpckhqdq xmm1, xmm2 movdqa xmm3, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm3, xmm11 movdqu xmmword ptr [rbx], xmm0 movdqu xmmword ptr [rbx+0x20], xmm1 movdqu xmmword ptr [rbx+0x40], xmm9 movdqu xmmword ptr [rbx+0x60], xmm3 movdqa xmm9, xmm4 punpckldq xmm4, xmm5 punpckhdq xmm9, xmm5 movdqa xmm11, xmm6 punpckldq xmm6, xmm7 punpckhdq xmm11, xmm7 movdqa xmm5, xmm4 punpcklqdq xmm4, xmm6 punpckhqdq xmm5, xmm6 movdqa xmm7, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm7, xmm11 movdqu xmmword ptr [rbx+0x10], xmm4 movdqu xmmword ptr [rbx+0x30], xmm5 movdqu xmmword ptr [rbx+0x50], xmm9 movdqu xmmword ptr [rbx+0x70], xmm7 movdqa xmm1, xmmword ptr [rsp+0x110] movdqa xmm0, xmm1 paddd xmm1, xmmword ptr [rsp+0x150] movdqa xmmword ptr [rsp+0x110], xmm1 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm0, xmm1 movdqa xmm1, xmmword ptr [rsp+0x120] psubd xmm1, xmm0 movdqa xmmword ptr [rsp+0x120], xmm1 add rbx, 128 add rdi, 32 sub rsi, 4 cmp rsi, 4 jnc 2b test rsi, rsi jne 3f 4: movdqa xmm6, xmmword ptr [rsp+0x170] movdqa xmm7, xmmword ptr [rsp+0x180] movdqa xmm8, xmmword ptr [rsp+0x190] movdqa xmm9, xmmword ptr [rsp+0x1A0] movdqa xmm10, xmmword ptr [rsp+0x1B0] movdqa xmm11, xmmword ptr [rsp+0x1C0] movdqa xmm12, xmmword ptr [rsp+0x1D0] movdqa xmm13, xmmword ptr [rsp+0x1E0] movdqa xmm14, xmmword ptr [rsp+0x1F0] movdqa xmm15, xmmword ptr [rsp+0x200] mov rsp, rbp pop rbp pop rbx pop rdi pop rsi pop r12 pop r13 pop r14 pop r15 ret .p2align 5 3: test esi, 0x2 je 3f movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm8, xmm0 movaps xmm9, xmm1 movd xmm13, dword ptr [rsp+0x110] pinsrd xmm13, dword ptr [rsp+0x120], 1 pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 movaps xmmword ptr [rsp], xmm13 movd xmm14, dword ptr [rsp+0x114] pinsrd xmm14, dword ptr [rsp+0x124], 1 pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 movaps xmmword ptr [rsp+0x10], xmm14 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movaps xmm10, xmm2 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm3, xmm4 shufps xmm4, xmm5, 136 shufps xmm3, xmm5, 221 movaps xmm5, xmm3 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm3, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm3, xmm7, 221 pshufd xmm7, xmm3, 0x93 movups xmm12, xmmword ptr [r9+rdx-0x40] movups xmm13, xmmword ptr [r9+rdx-0x30] movaps xmm11, xmm12 shufps xmm12, xmm13, 136 shufps xmm11, xmm13, 221 movaps xmm13, xmm11 movups xmm14, xmmword ptr [r9+rdx-0x20] movups xmm15, xmmword ptr [r9+rdx-0x10] movaps xmm11, xmm14 shufps xmm14, xmm15, 136 pshufd xmm14, xmm14, 0x93 shufps xmm11, xmm15, 221 pshufd xmm15, xmm11, 0x93 movaps xmm3, xmmword ptr [rsp] movaps xmm11, xmmword ptr [rsp+0x10] pinsrd xmm3, eax, 3 pinsrd xmm11, eax, 3 mov al, 7 9: paddd xmm0, xmm4 paddd xmm8, xmm12 movaps xmmword ptr [rsp+0x20], xmm4 movaps xmmword ptr [rsp+0x30], xmm12 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movaps xmm12, xmmword ptr [ROT16+rip] pshufb xmm3, xmm12 pshufb xmm11, xmm12 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm5 paddd xmm8, xmm13 movaps xmmword ptr [rsp+0x40], xmm5 movaps xmmword ptr [rsp+0x50], xmm13 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movaps xmm13, xmmword ptr [ROT8+rip] pshufb xmm3, xmm13 pshufb xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x93 pshufd xmm8, xmm8, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x39 pshufd xmm10, xmm10, 0x39 paddd xmm0, xmm6 paddd xmm8, xmm14 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshufb xmm3, xmm12 pshufb xmm11, xmm12 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm7 paddd xmm8, xmm15 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshufb xmm3, xmm13 pshufb xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x39 pshufd xmm8, xmm8, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x93 pshufd xmm10, xmm10, 0x93 dec al je 9f movdqa xmm12, xmmword ptr [rsp+0x20] movdqa xmm5, xmmword ptr [rsp+0x40] pshufd xmm13, xmm12, 0x0F shufps xmm12, xmm5, 214 pshufd xmm4, xmm12, 0x39 movdqa xmm12, xmm6 shufps xmm12, xmm7, 250 pblendw xmm13, xmm12, 0xCC movdqa xmm12, xmm7 punpcklqdq xmm12, xmm5 pblendw xmm12, xmm6, 0xC0 pshufd xmm12, xmm12, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmmword ptr [rsp+0x20], xmm13 movdqa xmmword ptr [rsp+0x40], xmm12 movdqa xmm5, xmmword ptr [rsp+0x30] movdqa xmm13, xmmword ptr [rsp+0x50] pshufd xmm6, xmm5, 0x0F shufps xmm5, xmm13, 214 pshufd xmm12, xmm5, 0x39 movdqa xmm5, xmm14 shufps xmm5, xmm15, 250 pblendw xmm6, xmm5, 0xCC movdqa xmm5, xmm15 punpcklqdq xmm5, xmm13 pblendw xmm5, xmm14, 0xC0 pshufd xmm5, xmm5, 0x78 punpckhdq xmm13, xmm15 punpckldq xmm14, xmm13 pshufd xmm15, xmm14, 0x1E movdqa xmm13, xmm6 movdqa xmm14, xmm5 movdqa xmm5, xmmword ptr [rsp+0x20] movdqa xmm6, xmmword ptr [rsp+0x40] jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm8, xmm10 pxor xmm9, xmm11 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 movups xmmword ptr [rbx+0x20], xmm8 movups xmmword ptr [rbx+0x30], xmm9 movdqa xmm0, xmmword ptr [rsp+0x130] movdqa xmm1, xmmword ptr [rsp+0x110] movdqa xmm2, xmmword ptr [rsp+0x120] movdqu xmm3, xmmword ptr [rsp+0x118] movdqu xmm4, xmmword ptr [rsp+0x128] blendvps xmm1, xmm3, xmm0 blendvps xmm2, xmm4, xmm0 movdqa xmmword ptr [rsp+0x110], xmm1 movdqa xmmword ptr [rsp+0x120], xmm2 add rdi, 16 add rbx, 64 sub rsi, 2 3: test esi, 0x1 je 4b movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movd xmm13, dword ptr [rsp+0x110] pinsrd xmm13, dword ptr [rsp+0x120], 1 pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 movaps xmm14, xmmword ptr [ROT8+rip] movaps xmm15, xmmword ptr [ROT16+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x80] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movaps xmm3, xmm13 pinsrd xmm3, eax, 3 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pblendw xmm9, xmm8, 0xCC movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 pblendw xmm8, xmm6, 0xC0 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 jmp 4b .p2align 6 blake3_compress_in_place_sse41: _blake3_compress_in_place_sse41: sub rsp, 120 movdqa xmmword ptr [rsp], xmm6 movdqa xmmword ptr [rsp+0x10], xmm7 movdqa xmmword ptr [rsp+0x20], xmm8 movdqa xmmword ptr [rsp+0x30], xmm9 movdqa xmmword ptr [rsp+0x40], xmm11 movdqa xmmword ptr [rsp+0x50], xmm14 movdqa xmmword ptr [rsp+0x60], xmm15 movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movzx eax, byte ptr [rsp+0xA0] movzx r8d, r8b shl rax, 32 add r8, rax movq xmm3, r9 movq xmm4, r8 punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rdx] movups xmm5, xmmword ptr [rdx+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rdx+0x20] movups xmm7, xmmword ptr [rdx+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 movaps xmm14, xmmword ptr [ROT8+rip] movaps xmm15, xmmword ptr [ROT16+rip] mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pblendw xmm9, xmm8, 0xCC movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 pblendw xmm8, xmm6, 0xC0 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 movups xmmword ptr [rcx], xmm0 movups xmmword ptr [rcx+0x10], xmm1 movdqa xmm6, xmmword ptr [rsp] movdqa xmm7, xmmword ptr [rsp+0x10] movdqa xmm8, xmmword ptr [rsp+0x20] movdqa xmm9, xmmword ptr [rsp+0x30] movdqa xmm11, xmmword ptr [rsp+0x40] movdqa xmm14, xmmword ptr [rsp+0x50] movdqa xmm15, xmmword ptr [rsp+0x60] add rsp, 120 ret .p2align 6 _blake3_compress_xof_sse41: blake3_compress_xof_sse41: sub rsp, 120 movdqa xmmword ptr [rsp], xmm6 movdqa xmmword ptr [rsp+0x10], xmm7 movdqa xmmword ptr [rsp+0x20], xmm8 movdqa xmmword ptr [rsp+0x30], xmm9 movdqa xmmword ptr [rsp+0x40], xmm11 movdqa xmmword ptr [rsp+0x50], xmm14 movdqa xmmword ptr [rsp+0x60], xmm15 movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movzx eax, byte ptr [rsp+0xA0] movzx r8d, r8b mov r10, qword ptr [rsp+0xA8] shl rax, 32 add r8, rax movq xmm3, r9 movq xmm4, r8 punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rdx] movups xmm5, xmmword ptr [rdx+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rdx+0x20] movups xmm7, xmmword ptr [rdx+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 movaps xmm14, xmmword ptr [ROT8+rip] movaps xmm15, xmmword ptr [ROT16+rip] mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm15 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 pshufb xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pblendw xmm9, xmm8, 0xCC movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 pblendw xmm8, xmm6, 0xC0 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: movdqu xmm4, xmmword ptr [rcx] movdqu xmm5, xmmword ptr [rcx+0x10] pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm2, xmm4 pxor xmm3, xmm5 movups xmmword ptr [r10], xmm0 movups xmmword ptr [r10+0x10], xmm1 movups xmmword ptr [r10+0x20], xmm2 movups xmmword ptr [r10+0x30], xmm3 movdqa xmm6, xmmword ptr [rsp] movdqa xmm7, xmmword ptr [rsp+0x10] movdqa xmm8, xmmword ptr [rsp+0x20] movdqa xmm9, xmmword ptr [rsp+0x30] movdqa xmm11, xmmword ptr [rsp+0x40] movdqa xmm14, xmmword ptr [rsp+0x50] movdqa xmm15, xmmword ptr [rsp+0x60] add rsp, 120 ret .section .rdata .p2align 6 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85 .long 0x3C6EF372, 0xA54FF53A ROT16: .byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13 ROT8: .byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12 ADD0: .long 0, 1, 2, 3 ADD1: .long 4, 4, 4, 4 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 64, 64, 64, 64 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000
semyeong-yu/RadFoam
69,094
external/submodules/mesa/src/util/blake3/blake3_sse2_x86-64_unix.S
#include "mesa_blake3_visibility.h" #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits #endif #if defined(__ELF__) && defined(__CET__) && defined(__has_include) #if __has_include(<cet.h>) #include <cet.h> #endif #endif #if !defined(_CET_ENDBR) #define _CET_ENDBR #endif .intel_syntax noprefix HIDDEN blake3_hash_many_sse2 HIDDEN _blake3_hash_many_sse2 HIDDEN blake3_compress_in_place_sse2 HIDDEN _blake3_compress_in_place_sse2 HIDDEN blake3_compress_xof_sse2 HIDDEN _blake3_compress_xof_sse2 .global blake3_hash_many_sse2 .global _blake3_hash_many_sse2 .global blake3_compress_in_place_sse2 .global _blake3_compress_in_place_sse2 .global blake3_compress_xof_sse2 .global _blake3_compress_xof_sse2 #ifdef __APPLE__ .text #else .section .text #endif .p2align 6 _blake3_hash_many_sse2: blake3_hash_many_sse2: _CET_ENDBR push r15 push r14 push r13 push r12 push rbx push rbp mov rbp, rsp sub rsp, 360 and rsp, 0xFFFFFFFFFFFFFFC0 neg r9d movd xmm0, r9d pshufd xmm0, xmm0, 0x00 movdqa xmmword ptr [rsp+0x130], xmm0 movdqa xmm1, xmm0 pand xmm1, xmmword ptr [ADD0+rip] pand xmm0, xmmword ptr [ADD1+rip] movdqa xmmword ptr [rsp+0x150], xmm0 movd xmm0, r8d pshufd xmm0, xmm0, 0x00 paddd xmm0, xmm1 movdqa xmmword ptr [rsp+0x110], xmm0 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm1, xmm0 shr r8, 32 movd xmm2, r8d pshufd xmm2, xmm2, 0x00 psubd xmm2, xmm1 movdqa xmmword ptr [rsp+0x120], xmm2 mov rbx, qword ptr [rbp+0x50] mov r15, rdx shl r15, 6 movzx r13d, byte ptr [rbp+0x38] movzx r12d, byte ptr [rbp+0x48] cmp rsi, 4 jc 3f 2: movdqu xmm3, xmmword ptr [rcx] pshufd xmm0, xmm3, 0x00 pshufd xmm1, xmm3, 0x55 pshufd xmm2, xmm3, 0xAA pshufd xmm3, xmm3, 0xFF movdqu xmm7, xmmword ptr [rcx+0x10] pshufd xmm4, xmm7, 0x00 pshufd xmm5, xmm7, 0x55 pshufd xmm6, xmm7, 0xAA pshufd xmm7, xmm7, 0xFF mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx 9: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movdqu xmm8, xmmword ptr [r8+rdx-0x40] movdqu xmm9, xmmword ptr [r9+rdx-0x40] movdqu xmm10, xmmword ptr [r10+rdx-0x40] movdqu xmm11, xmmword ptr [r11+rdx-0x40] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp], xmm8 movdqa xmmword ptr [rsp+0x10], xmm9 movdqa xmmword ptr [rsp+0x20], xmm12 movdqa xmmword ptr [rsp+0x30], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x30] movdqu xmm9, xmmword ptr [r9+rdx-0x30] movdqu xmm10, xmmword ptr [r10+rdx-0x30] movdqu xmm11, xmmword ptr [r11+rdx-0x30] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x40], xmm8 movdqa xmmword ptr [rsp+0x50], xmm9 movdqa xmmword ptr [rsp+0x60], xmm12 movdqa xmmword ptr [rsp+0x70], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x20] movdqu xmm9, xmmword ptr [r9+rdx-0x20] movdqu xmm10, xmmword ptr [r10+rdx-0x20] movdqu xmm11, xmmword ptr [r11+rdx-0x20] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0x80], xmm8 movdqa xmmword ptr [rsp+0x90], xmm9 movdqa xmmword ptr [rsp+0xA0], xmm12 movdqa xmmword ptr [rsp+0xB0], xmm13 movdqu xmm8, xmmword ptr [r8+rdx-0x10] movdqu xmm9, xmmword ptr [r9+rdx-0x10] movdqu xmm10, xmmword ptr [r10+rdx-0x10] movdqu xmm11, xmmword ptr [r11+rdx-0x10] movdqa xmm12, xmm8 punpckldq xmm8, xmm9 punpckhdq xmm12, xmm9 movdqa xmm14, xmm10 punpckldq xmm10, xmm11 punpckhdq xmm14, xmm11 movdqa xmm9, xmm8 punpcklqdq xmm8, xmm10 punpckhqdq xmm9, xmm10 movdqa xmm13, xmm12 punpcklqdq xmm12, xmm14 punpckhqdq xmm13, xmm14 movdqa xmmword ptr [rsp+0xC0], xmm8 movdqa xmmword ptr [rsp+0xD0], xmm9 movdqa xmmword ptr [rsp+0xE0], xmm12 movdqa xmmword ptr [rsp+0xF0], xmm13 movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip] movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip] movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip] movdqa xmm12, xmmword ptr [rsp+0x110] movdqa xmm13, xmmword ptr [rsp+0x120] movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip] movd xmm15, eax pshufd xmm15, xmm15, 0x00 prefetcht0 [r8+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r11+rdx+0x80] paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x80] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x70] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x10] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0xD0] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x60] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xB0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x50] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0xE0] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x40] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x50] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xC0] paddd xmm1, xmmword ptr [rsp+0x90] paddd xmm2, xmmword ptr [rsp+0xF0] paddd xmm3, xmmword ptr [rsp+0xE0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0xA0] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0x70] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x20] paddd xmm1, xmmword ptr [rsp+0x30] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x90] paddd xmm1, xmmword ptr [rsp+0xB0] paddd xmm2, xmmword ptr [rsp+0x80] paddd xmm3, xmmword ptr [rsp+0xF0] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0xC0] paddd xmm3, xmmword ptr [rsp+0x10] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xD0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x20] paddd xmm3, xmmword ptr [rsp+0x40] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0x30] paddd xmm1, xmmword ptr [rsp+0xA0] paddd xmm2, xmmword ptr [rsp+0x60] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xB0] paddd xmm1, xmmword ptr [rsp+0x50] paddd xmm2, xmmword ptr [rsp+0x10] paddd xmm3, xmmword ptr [rsp+0x80] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xF0] paddd xmm1, xmmword ptr [rsp] paddd xmm2, xmmword ptr [rsp+0x90] paddd xmm3, xmmword ptr [rsp+0x60] paddd xmm0, xmm4 paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 pxor xmm12, xmm0 pxor xmm13, xmm1 pxor xmm14, xmm2 pxor xmm15, xmm3 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm12 paddd xmm9, xmm13 paddd xmm10, xmm14 paddd xmm11, xmm15 pxor xmm4, xmm8 pxor xmm5, xmm9 pxor xmm6, xmm10 pxor xmm7, xmm11 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 paddd xmm0, xmmword ptr [rsp+0xE0] paddd xmm1, xmmword ptr [rsp+0x20] paddd xmm2, xmmword ptr [rsp+0x30] paddd xmm3, xmmword ptr [rsp+0x70] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 pshuflw xmm15, xmm15, 0xB1 pshufhw xmm15, xmm15, 0xB1 pshuflw xmm12, xmm12, 0xB1 pshufhw xmm12, xmm12, 0xB1 pshuflw xmm13, xmm13, 0xB1 pshufhw xmm13, xmm13, 0xB1 pshuflw xmm14, xmm14, 0xB1 pshufhw xmm14, xmm14, 0xB1 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 movdqa xmmword ptr [rsp+0x100], xmm8 movdqa xmm8, xmm5 psrld xmm8, 12 pslld xmm5, 20 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 12 pslld xmm6, 20 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 12 pslld xmm7, 20 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 12 pslld xmm4, 20 por xmm4, xmm8 paddd xmm0, xmmword ptr [rsp+0xA0] paddd xmm1, xmmword ptr [rsp+0xC0] paddd xmm2, xmmword ptr [rsp+0x40] paddd xmm3, xmmword ptr [rsp+0xD0] paddd xmm0, xmm5 paddd xmm1, xmm6 paddd xmm2, xmm7 paddd xmm3, xmm4 pxor xmm15, xmm0 pxor xmm12, xmm1 pxor xmm13, xmm2 pxor xmm14, xmm3 movdqa xmm8, xmm15 psrld xmm15, 8 pslld xmm8, 24 pxor xmm15, xmm8 movdqa xmm8, xmm12 psrld xmm12, 8 pslld xmm8, 24 pxor xmm12, xmm8 movdqa xmm8, xmm13 psrld xmm13, 8 pslld xmm8, 24 pxor xmm13, xmm8 movdqa xmm8, xmm14 psrld xmm14, 8 pslld xmm8, 24 pxor xmm14, xmm8 paddd xmm10, xmm15 paddd xmm11, xmm12 movdqa xmm8, xmmword ptr [rsp+0x100] paddd xmm8, xmm13 paddd xmm9, xmm14 pxor xmm5, xmm10 pxor xmm6, xmm11 pxor xmm7, xmm8 pxor xmm4, xmm9 pxor xmm0, xmm8 pxor xmm1, xmm9 pxor xmm2, xmm10 pxor xmm3, xmm11 movdqa xmm8, xmm5 psrld xmm8, 7 pslld xmm5, 25 por xmm5, xmm8 movdqa xmm8, xmm6 psrld xmm8, 7 pslld xmm6, 25 por xmm6, xmm8 movdqa xmm8, xmm7 psrld xmm8, 7 pslld xmm7, 25 por xmm7, xmm8 movdqa xmm8, xmm4 psrld xmm8, 7 pslld xmm4, 25 por xmm4, xmm8 pxor xmm4, xmm12 pxor xmm5, xmm13 pxor xmm6, xmm14 pxor xmm7, xmm15 mov eax, r13d jne 9b movdqa xmm9, xmm0 punpckldq xmm0, xmm1 punpckhdq xmm9, xmm1 movdqa xmm11, xmm2 punpckldq xmm2, xmm3 punpckhdq xmm11, xmm3 movdqa xmm1, xmm0 punpcklqdq xmm0, xmm2 punpckhqdq xmm1, xmm2 movdqa xmm3, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm3, xmm11 movdqu xmmword ptr [rbx], xmm0 movdqu xmmword ptr [rbx+0x20], xmm1 movdqu xmmword ptr [rbx+0x40], xmm9 movdqu xmmword ptr [rbx+0x60], xmm3 movdqa xmm9, xmm4 punpckldq xmm4, xmm5 punpckhdq xmm9, xmm5 movdqa xmm11, xmm6 punpckldq xmm6, xmm7 punpckhdq xmm11, xmm7 movdqa xmm5, xmm4 punpcklqdq xmm4, xmm6 punpckhqdq xmm5, xmm6 movdqa xmm7, xmm9 punpcklqdq xmm9, xmm11 punpckhqdq xmm7, xmm11 movdqu xmmword ptr [rbx+0x10], xmm4 movdqu xmmword ptr [rbx+0x30], xmm5 movdqu xmmword ptr [rbx+0x50], xmm9 movdqu xmmword ptr [rbx+0x70], xmm7 movdqa xmm1, xmmword ptr [rsp+0x110] movdqa xmm0, xmm1 paddd xmm1, xmmword ptr [rsp+0x150] movdqa xmmword ptr [rsp+0x110], xmm1 pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip] pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip] pcmpgtd xmm0, xmm1 movdqa xmm1, xmmword ptr [rsp+0x120] psubd xmm1, xmm0 movdqa xmmword ptr [rsp+0x120], xmm1 add rbx, 128 add rdi, 32 sub rsi, 4 cmp rsi, 4 jnc 2b test rsi, rsi jnz 3f 4: mov rsp, rbp pop rbp pop rbx pop r12 pop r13 pop r14 pop r15 ret .p2align 5 3: test esi, 0x2 je 3f movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movaps xmm8, xmm0 movaps xmm9, xmm1 movd xmm13, dword ptr [rsp+0x110] movd xmm14, dword ptr [rsp+0x120] punpckldq xmm13, xmm14 movaps xmmword ptr [rsp], xmm13 movd xmm14, dword ptr [rsp+0x114] movd xmm13, dword ptr [rsp+0x124] punpckldq xmm14, xmm13 movaps xmmword ptr [rsp+0x10], xmm14 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movaps xmm10, xmm2 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm3, xmm4 shufps xmm4, xmm5, 136 shufps xmm3, xmm5, 221 movaps xmm5, xmm3 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm3, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm3, xmm7, 221 pshufd xmm7, xmm3, 0x93 movups xmm12, xmmword ptr [r9+rdx-0x40] movups xmm13, xmmword ptr [r9+rdx-0x30] movaps xmm11, xmm12 shufps xmm12, xmm13, 136 shufps xmm11, xmm13, 221 movaps xmm13, xmm11 movups xmm14, xmmword ptr [r9+rdx-0x20] movups xmm15, xmmword ptr [r9+rdx-0x10] movaps xmm11, xmm14 shufps xmm14, xmm15, 136 pshufd xmm14, xmm14, 0x93 shufps xmm11, xmm15, 221 pshufd xmm15, xmm11, 0x93 shl rax, 0x20 or rax, 0x40 movq xmm3, rax movdqa xmmword ptr [rsp+0x20], xmm3 movaps xmm3, xmmword ptr [rsp] movaps xmm11, xmmword ptr [rsp+0x10] punpcklqdq xmm3, xmmword ptr [rsp+0x20] punpcklqdq xmm11, xmmword ptr [rsp+0x20] mov al, 7 9: paddd xmm0, xmm4 paddd xmm8, xmm12 movaps xmmword ptr [rsp+0x20], xmm4 movaps xmmword ptr [rsp+0x30], xmm12 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 pshuflw xmm11, xmm11, 0xB1 pshufhw xmm11, xmm11, 0xB1 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm5 paddd xmm8, xmm13 movaps xmmword ptr [rsp+0x40], xmm5 movaps xmmword ptr [rsp+0x50], xmm13 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movdqa xmm13, xmm3 psrld xmm3, 8 pslld xmm13, 24 pxor xmm3, xmm13 movdqa xmm13, xmm11 psrld xmm11, 8 pslld xmm13, 24 pxor xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x93 pshufd xmm8, xmm8, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x39 pshufd xmm10, xmm10, 0x39 paddd xmm0, xmm6 paddd xmm8, xmm14 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 pshuflw xmm11, xmm11, 0xB1 pshufhw xmm11, xmm11, 0xB1 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 20 psrld xmm4, 12 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 20 psrld xmm4, 12 por xmm9, xmm4 paddd xmm0, xmm7 paddd xmm8, xmm15 paddd xmm0, xmm1 paddd xmm8, xmm9 pxor xmm3, xmm0 pxor xmm11, xmm8 movdqa xmm13, xmm3 psrld xmm3, 8 pslld xmm13, 24 pxor xmm3, xmm13 movdqa xmm13, xmm11 psrld xmm11, 8 pslld xmm13, 24 pxor xmm11, xmm13 paddd xmm2, xmm3 paddd xmm10, xmm11 pxor xmm1, xmm2 pxor xmm9, xmm10 movdqa xmm4, xmm1 pslld xmm1, 25 psrld xmm4, 7 por xmm1, xmm4 movdqa xmm4, xmm9 pslld xmm9, 25 psrld xmm4, 7 por xmm9, xmm4 pshufd xmm0, xmm0, 0x39 pshufd xmm8, xmm8, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm11, xmm11, 0x4E pshufd xmm2, xmm2, 0x93 pshufd xmm10, xmm10, 0x93 dec al je 9f movdqa xmm12, xmmword ptr [rsp+0x20] movdqa xmm5, xmmword ptr [rsp+0x40] pshufd xmm13, xmm12, 0x0F shufps xmm12, xmm5, 214 pshufd xmm4, xmm12, 0x39 movdqa xmm12, xmm6 shufps xmm12, xmm7, 250 pand xmm13, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm13, xmm12 movdqa xmmword ptr [rsp+0x20], xmm13 movdqa xmm12, xmm7 punpcklqdq xmm12, xmm5 movdqa xmm13, xmm6 pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm12, xmm13 pshufd xmm12, xmm12, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmmword ptr [rsp+0x40], xmm12 movdqa xmm5, xmmword ptr [rsp+0x30] movdqa xmm13, xmmword ptr [rsp+0x50] pshufd xmm6, xmm5, 0x0F shufps xmm5, xmm13, 214 pshufd xmm12, xmm5, 0x39 movdqa xmm5, xmm14 shufps xmm5, xmm15, 250 pand xmm6, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm6, xmm5 movdqa xmm5, xmm15 punpcklqdq xmm5, xmm13 movdqa xmmword ptr [rsp+0x30], xmm2 movdqa xmm2, xmm14 pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm5, xmm2 movdqa xmm2, xmmword ptr [rsp+0x30] pshufd xmm5, xmm5, 0x78 punpckhdq xmm13, xmm15 punpckldq xmm14, xmm13 pshufd xmm15, xmm14, 0x1E movdqa xmm13, xmm6 movdqa xmm14, xmm5 movdqa xmm5, xmmword ptr [rsp+0x20] movdqa xmm6, xmmword ptr [rsp+0x40] jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm8, xmm10 pxor xmm9, xmm11 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 movups xmmword ptr [rbx+0x20], xmm8 movups xmmword ptr [rbx+0x30], xmm9 mov eax, dword ptr [rsp+0x130] neg eax mov r10d, dword ptr [rsp+0x110+8*rax] mov r11d, dword ptr [rsp+0x120+8*rax] mov dword ptr [rsp+0x110], r10d mov dword ptr [rsp+0x120], r11d add rdi, 16 add rbx, 64 sub rsi, 2 3: test esi, 0x1 je 4b movups xmm0, xmmword ptr [rcx] movups xmm1, xmmword ptr [rcx+0x10] movd xmm13, dword ptr [rsp+0x110] movd xmm14, dword ptr [rsp+0x120] punpckldq xmm13, xmm14 mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d movaps xmm2, xmmword ptr [BLAKE3_IV+rip] shl rax, 32 or rax, 64 movq xmm12, rax movdqa xmm3, xmm13 punpcklqdq xmm3, xmm12 movups xmm4, xmmword ptr [r8+rdx-0x40] movups xmm5, xmmword ptr [r8+rdx-0x30] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [r8+rdx-0x20] movups xmm7, xmmword ptr [r8+rdx-0x10] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm9, xmm8 movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 movdqa xmm10, xmm6 pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm8, xmm10 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b movups xmmword ptr [rbx], xmm0 movups xmmword ptr [rbx+0x10], xmm1 jmp 4b .p2align 6 blake3_compress_in_place_sse2: _blake3_compress_in_place_sse2: _CET_ENDBR movups xmm0, xmmword ptr [rdi] movups xmm1, xmmword ptr [rdi+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] shl r8, 32 add rdx, r8 movq xmm3, rcx movq xmm4, rdx punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rsi] movups xmm5, xmmword ptr [rsi+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rsi+0x20] movups xmm7, xmmword ptr [rsi+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm9, xmm8 movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 movdqa xmm10, xmm6 pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm8, xmm10 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: pxor xmm0, xmm2 pxor xmm1, xmm3 movups xmmword ptr [rdi], xmm0 movups xmmword ptr [rdi+0x10], xmm1 ret .p2align 6 blake3_compress_xof_sse2: _blake3_compress_xof_sse2: _CET_ENDBR movups xmm0, xmmword ptr [rdi] movups xmm1, xmmword ptr [rdi+0x10] movaps xmm2, xmmword ptr [BLAKE3_IV+rip] movzx eax, r8b movzx edx, dl shl rax, 32 add rdx, rax movq xmm3, rcx movq xmm4, rdx punpcklqdq xmm3, xmm4 movups xmm4, xmmword ptr [rsi] movups xmm5, xmmword ptr [rsi+0x10] movaps xmm8, xmm4 shufps xmm4, xmm5, 136 shufps xmm8, xmm5, 221 movaps xmm5, xmm8 movups xmm6, xmmword ptr [rsi+0x20] movups xmm7, xmmword ptr [rsi+0x30] movaps xmm8, xmm6 shufps xmm6, xmm7, 136 pshufd xmm6, xmm6, 0x93 shufps xmm8, xmm7, 221 pshufd xmm7, xmm8, 0x93 mov al, 7 9: paddd xmm0, xmm4 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm5 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x93 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x39 paddd xmm0, xmm6 paddd xmm0, xmm1 pxor xmm3, xmm0 pshuflw xmm3, xmm3, 0xB1 pshufhw xmm3, xmm3, 0xB1 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 20 psrld xmm11, 12 por xmm1, xmm11 paddd xmm0, xmm7 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm14, xmm3 psrld xmm3, 8 pslld xmm14, 24 pxor xmm3, xmm14 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm11, xmm1 pslld xmm1, 25 psrld xmm11, 7 por xmm1, xmm11 pshufd xmm0, xmm0, 0x39 pshufd xmm3, xmm3, 0x4E pshufd xmm2, xmm2, 0x93 dec al jz 9f movdqa xmm8, xmm4 shufps xmm8, xmm5, 214 pshufd xmm9, xmm4, 0x0F pshufd xmm4, xmm8, 0x39 movdqa xmm8, xmm6 shufps xmm8, xmm7, 250 pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip] pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip] por xmm9, xmm8 movdqa xmm8, xmm7 punpcklqdq xmm8, xmm5 movdqa xmm10, xmm6 pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip] pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip] por xmm8, xmm10 pshufd xmm8, xmm8, 0x78 punpckhdq xmm5, xmm7 punpckldq xmm6, xmm5 pshufd xmm7, xmm6, 0x1E movdqa xmm5, xmm9 movdqa xmm6, xmm8 jmp 9b 9: movdqu xmm4, xmmword ptr [rdi] movdqu xmm5, xmmword ptr [rdi+0x10] pxor xmm0, xmm2 pxor xmm1, xmm3 pxor xmm2, xmm4 pxor xmm3, xmm5 movups xmmword ptr [r9], xmm0 movups xmmword ptr [r9+0x10], xmm1 movups xmmword ptr [r9+0x20], xmm2 movups xmmword ptr [r9+0x30], xmm3 ret #ifdef __APPLE__ .static_data #else .section .rodata #endif .p2align 6 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85 .long 0x3C6EF372, 0xA54FF53A ADD0: .long 0, 1, 2, 3 ADD1: .long 4, 4, 4, 4 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 64, 64, 64, 64 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 PBLENDW_0x33_MASK: .long 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000 PBLENDW_0xCC_MASK: .long 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF PBLENDW_0x3F_MASK: .long 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 PBLENDW_0xC0_MASK: .long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
SergeiPatiakin/rsas
7,449
as-examples/cat.s
.text .global _start _start: movq %rsp, %rdx movq (%rdx), %rdi // main(argc, argv); movq %rdx, %rsi addq $0x8, %rsi callq main movq $0x3c, %rax // exit( movq $0x0, %rdi // 0 syscall // ) // // void main(u64 argc, char** argv) // main: pushq %rbp movq %rsp, %rbp subq $0x38, %rsp // // 8 bytes for in_fd at -0x8(%rbp) // 8 bytes for chunk_bytes_read at -0x10(%rbp) // 8 bytes for call_bytes_written at -0x18(%rbp) // 8 bytes for chunk_bytes_written at -0x20(%rbp) // 8 bytes for argn at -0x28(%rbp) // 8 bytes for argc at -0x30(%rbp) // 8 bytes for argv at -0x38(%rbp) movq %rdi, -0x30(%rbp) movq %rsi, -0x38(%rbp) cmpq $0x2, %rdi // if (argc < 2) jge main_argc_ok movq $errmsg_noargs, %rdi // panic(errmsg_noargs); call panic main_argc_ok: movq $0x1, -0x28(%rbp) // argn = 1; main_read_file: movq -0x30(%rbp), %rdi movq -0x38(%rbp), %rsi cmpq %rdi, -0x28(%rbp) // while (argn < argc) { jge main_finished movq $0x2, %rax // in_fd = open( movq -0x28(%rbp), %rcx addq %rcx, %rcx addq %rcx, %rcx addq %rcx, %rcx addq %rcx, %rsi movq (%rsi), %rdi // argv[argn], movq $0x2, %rsi // O_RDWR, movq $0x0, %rdx // 0 // mode syscall movq %rax, -0x8(%rbp) // ); movq %rax, %rdi call assert_syscall_success // assert_syscall_success(in_fd); main_read_chunk: // while(true) { movq $0x0, %rax // chunk_bytes_read = read( movq -0x8(%rbp), %rdi // in_fd, movq $content_buffer, %rsi // content_buffer, movq $0x1000, %rdx // 0x1000 syscall // ); movq %rax, -0x10(%rbp) movq %rax, %rdi call assert_syscall_success // assert_syscall_success(chunk_bytes_read); cmpq $0x0, -0x10(%rbp) // if (chunk_bytes_read == 0) break; jz main_file_finished movq $0x0, -0x18(%rbp) // chunk_bytes_written = 0; main_write_more: movq -0x10(%rbp), %rdx // while(chunk_bytes_to_write = chunk_bytes_read - chunk_bytes_written) { subq -0x18(%rbp), %rdx cmpq $0x0, %rdx jz main_chunk_finished movq $0x1, %rax // call_bytes_written = write( movq $0x1, %rdi // 1, movq $content_buffer, %rsi // content_buffer + chunk_bytes_written, addq -0x18(%rbp), %rsi // chunk_bytes_to_write syscall // ); movq %rax, -0x18(%rbp) movq %rax, %rdi call assert_syscall_success // assert_syscall_success(bytes_written); jmp main_write_more // } main_chunk_finished: jmp main_read_chunk // } main_file_finished: addq $0x1, -0x28(%rbp) // argn += 1; jmp main_read_file // } main_finished: movq %rbp, %rsp // return; popq %rbp retq // // u64 strlen (char* s) // strlen: movq $0x0, %rax // u64 i = 0; strlen_loop: // while (true) { movq %rdi, %rsi // char* c = s + i; addq %rax, %rsi testq $0xff, (%rsi) // if (!*c) { jz strlen_end // break; addq $0x1, %rax // } jmp strlen_loop // } strlen_end: retq // return i; // // void assert_syscall_success (i64 syscall_result) // assert_syscall_success: pushq %rbp movq %rsp, %rbp subq $0x10, %rsp // syscall_result at -0x8(%rbp) // i at -0x10(%rbp) movq %rdi, -0x8(%rbp) movq $0x0, -0x10(%rbp) // -1 to -4095 means error, anything else means success cmpq $-0xfff, %rdi // if (syscall_result < -4095) jl assert_syscall_success_ok // return; cmpq $0x0, %rdi // if (syscall_result >= 0) jge assert_syscall_success_ok // return; movq $errmsg_syscall, %rdi // panic(errmsg_syscall); call panic assert_syscall_success_ok: movq %rbp, %rsp popq %rbp retq // // void panic (char* errmsg) // panic: pushq %rbp movq %rsp, %rbp subq $0x8, %rsp // errmsg at -0x8(%rbp) movq %rdi, -0x8(%rbp) call strlen movq %rax, %rcx movq $0x1, %rax // write( movq $0x2, %rdi // 2, movq -0x8(%rbp), %rsi // errmsg, movq %rcx, %rdx // errmsg_len syscall // ) movq $0x1, %rax // write( movq $0x2, %rdi // 2, movq $newline_char, %rsi // newline_char, movq $0x1, %rdx // 1 syscall // ) movq $0x3c, %rax // exit( movq $0x1, %rdi // 1 syscall // ) .data errmsg_noargs: .asciz "No args provided" errmsg_enoent: .asciz "No such file or directory" errmsg_syscall: .asciz "Syscall error" newline_char: .byte 0x0A content_buffer: .skip 0x1000
SergeiPatiakin/rsas
2,021
as-examples/hello-main.s
.text .global _start _start: main_loop: movq $0x1, %rax // write( movq $0x1, %rdi // 1, movq $local_hello_msg, %rsi // local_hello_msg, movq $0x13, %rdx // 19 syscall // ) movq $0x1, %rax // write( movq $0x1, %rdi // 1, movq $global_hello_msg, %rsi // global_hello_msg, movq $0x14, %rdx // 20 syscall // ) callq local_subroutine // local_subroutine(); callq hello_module_subroutine // hello_module_subroutine(); movq $0x3c, %rax // exit( movq $0x0, %rdi // 0 syscall // ) // void local_subroutine() local_subroutine: movq $0x1, %rax // write( movq $0x1, %rdi // 1, movq $local_subroutine_hello_msg, %rsi // local_subroutine_hello_msg, movq $0x19, %rdx // 25 syscall // ) retq // return; .data .quad 0x0 // Dummy local_hello_msg: // Local symbol .asciz "Hello Local World!\n" .global global_hello_msg global_hello_msg: // Global symbol .asciz "Hello Global World!\n" local_subroutine_hello_msg: .asciz "Hello Local Subroutine!\n"
Sergio7DAW/Entorno_Escenarios_UD3
7,962
escenario2/tests/syntax-tests/highlighted/ARM Assembly/test.S
.data .balign 4 red: .word 0 green: .word 0 blue: .word 0 .text .global grayscale .func grayscale grayscale: assign:  /* some comment */  ldr ip, addr_red  str r3, [ip]  ldr ip, addr_green  ldmfd r13!, {r3}  str r3, [ip]  ldr ip, addr_blue  ldmfd r13!, {r3}  str r3, [ip]  stmfd r13!, {r4-r8}  ldr ip, addr_red  ldr r3, [ip]  ldr ip, addr_green  ldr r4, [ip]  ldr ip, addr_blue  ldr r5, [ip] /* another comment */ grayscale_loop:  ldrb r6, [r1]  mul r6, r3, r6  add r1, r1, #1  ldrb r7, [r1]  mul r7, r4, r7  add r1, r1, #1  ldrb r8, [r1]  mul r8, r5, r8  add r1, r1, #1  add r6, r6, r7  add r6, r6, r8  asr r6, r6, #8  str r6, [r2]  add r2, r2, #1  sub r0, r0, #1  cmp r0, #0  bne grayscale_loop  ldmfd r13!, {r4-r8}  stmfd r13!, {r0-r1}  bx lr addr_red: .word red addr_green: .word green addr_blue: .word blue
sevki/codex
1,406
kernel_loader/src/test_elf.S
# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Build instructions: # x86_64-linux-gnu-as test_elf.S -o test_elf.o # x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld .intel_syntax noprefix .section .rodata hello_world: .string "Hello world!\n" .set hello_size, .-hello_world .text .globl _start _start: lea rsi, [rip + hello_world] # rsi -> message string mov rcx, hello_size # rcx = length of message mov dx, 0x3F8 # dx = COM1 port .print_loop: # Wait for the transmit buffer to be empty by polling the line status. add dx, 5 # dx = line status register .wait_empty: in al, dx # read line status test al, 0x20 # check buffer empty flag jz .wait_empty # keep waiting if flag is not set .wait_done: sub dx, 5 # dx = data register # Load a byte of the message and send it to the serial port. lodsb # load message byte from RSI to AL out dx, al # send byte to serial port dec rcx # rcx-- jnz .print_loop # repeat if rcx != 0 .done: int3 # cause vcpu to exit
sevki/libcrosvm
1,406
kernel_loader/src/test_elf.S
# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Build instructions: # x86_64-linux-gnu-as test_elf.S -o test_elf.o # x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld .intel_syntax noprefix .section .rodata hello_world: .string "Hello world!\n" .set hello_size, .-hello_world .text .globl _start _start: lea rsi, [rip + hello_world] # rsi -> message string mov rcx, hello_size # rcx = length of message mov dx, 0x3F8 # dx = COM1 port .print_loop: # Wait for the transmit buffer to be empty by polling the line status. add dx, 5 # dx = line status register .wait_empty: in al, dx # read line status test al, 0x20 # check buffer empty flag jz .wait_empty # keep waiting if flag is not set .wait_done: sub dx, 5 # dx = data register # Load a byte of the message and send it to the serial port. lodsb # load message byte from RSI to AL out dx, al # send byte to serial port dec rcx # rcx-- jnz .print_loop # repeat if rcx != 0 .done: int3 # cause vcpu to exit
Sergio7DAW/Entorno_Escenarios_UD3
7,962
escenario2/tests/syntax-tests/highlighted/ARM Assembly/test.S
.data .balign 4 red: .word 0 green: .word 0 blue: .word 0 .text .global grayscale .func grayscale grayscale: assign:  /* some comment */  ldr ip, addr_red  str r3, [ip]  ldr ip, addr_green  ldmfd r13!, {r3}  str r3, [ip]  ldr ip, addr_blue  ldmfd r13!, {r3}  str r3, [ip]  stmfd r13!, {r4-r8}  ldr ip, addr_red  ldr r3, [ip]  ldr ip, addr_green  ldr r4, [ip]  ldr ip, addr_blue  ldr r5, [ip] /* another comment */ grayscale_loop:  ldrb r6, [r1]  mul r6, r3, r6  add r1, r1, #1  ldrb r7, [r1]  mul r7, r4, r7  add r1, r1, #1  ldrb r8, [r1]  mul r8, r5, r8  add r1, r1, #1  add r6, r6, r7  add r6, r6, r8  asr r6, r6, #8  str r6, [r2]  add r2, r2, #1  sub r0, r0, #1  cmp r0, #0  bne grayscale_loop  ldmfd r13!, {r4-r8}  stmfd r13!, {r0-r1}  bx lr addr_red: .word red addr_green: .word green addr_blue: .word blue
sevki/libcrosvm
1,406
kernel_loader/src/test_elf.S
# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Build instructions: # x86_64-linux-gnu-as test_elf.S -o test_elf.o # x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld .intel_syntax noprefix .section .rodata hello_world: .string "Hello world!\n" .set hello_size, .-hello_world .text .globl _start _start: lea rsi, [rip + hello_world] # rsi -> message string mov rcx, hello_size # rcx = length of message mov dx, 0x3F8 # dx = COM1 port .print_loop: # Wait for the transmit buffer to be empty by polling the line status. add dx, 5 # dx = line status register .wait_empty: in al, dx # read line status test al, 0x20 # check buffer empty flag jz .wait_empty # keep waiting if flag is not set .wait_done: sub dx, 5 # dx = data register # Load a byte of the message and send it to the serial port. lodsb # load message byte from RSI to AL out dx, al # send byte to serial port dec rcx # rcx-- jnz .print_loop # repeat if rcx != 0 .done: int3 # cause vcpu to exit
sevki/codex
1,406
kernel_loader/src/test_elf.S
# Copyright 2022 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Build instructions: # x86_64-linux-gnu-as test_elf.S -o test_elf.o # x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld .intel_syntax noprefix .section .rodata hello_world: .string "Hello world!\n" .set hello_size, .-hello_world .text .globl _start _start: lea rsi, [rip + hello_world] # rsi -> message string mov rcx, hello_size # rcx = length of message mov dx, 0x3F8 # dx = COM1 port .print_loop: # Wait for the transmit buffer to be empty by polling the line status. add dx, 5 # dx = line status register .wait_empty: in al, dx # read line status test al, 0x20 # check buffer empty flag jz .wait_empty # keep waiting if flag is not set .wait_done: sub dx, 5 # dx = data register # Load a byte of the message and send it to the serial port. lodsb # load message byte from RSI to AL out dx, al # send byte to serial port dec rcx # rcx-- jnz .print_loop # repeat if rcx != 0 .done: int3 # cause vcpu to exit
ShaoxunZeng/PyTorch-Medusa
27,813
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-aarch64-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> # Args passed via 8 registers (64 bytes) # x0: mr # x1: nr # x2: k # x3: a # x4: a_stride # x5: w # x6: c # x7: c_stride # # Args passed via stack. # TOS # |-----------| # |out ch indx| 0 # |params | 8 # |-----------| # void pytorch_q8gemm_ukernel_8x8__aarch64_neon( # size_t mr, # size_t nr, # size_t k, # const uint8_t*restrict a, # size_t a_stride, # const void*restrict w, # uint8_t*restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8gemm_ukernel_8x8__aarch64_neon # https://developer.arm.com/docs/ihi0055/d/procedure-call-standard-for-the-arm-64-bit-architecture # Callee need to save 8-15 vector registers and only the lower 64 bits of each. # Load params LDP x16, x8, [sp] STP d15, d14, [sp, -16] # Load pointer to per channel zero points array # And go to the a_zero_point with post-index LDR x17, [x8], 8 STP d13, d12, [sp, -32] STP d11, d10, [sp, -48] STP d9, d8, [sp, -64] # Load bias0123, bias4567 LD1 {v8.4s, v9.4s}, [x5], 32 # Add offset to the base pointer ADD x17, x17, x16 # Load b_zero_point LD1 {v25.8b}, [x17] # Load a_zero_point LD1R {v24.8b}, [x8] # Load pointer to per channel requant scale LDR x17, [x8, 8] ADD x8, x8, 16 # v10 := vacc1x0123 MOV v10.16b, v8.16b # v11 := vacc1x4567 MOV v11.16b, v9.16b # v12 := vacc2x0123 MOV v12.16b, v8.16b # v13 := vacc2x4567 MOV v13.16b, v9.16b # v14 := vacc3x0123 MOV v14.16b, v8.16b # v15 := vacc3x4567 MOV v15.16b, v9.16b # v16 := vacc4x0123 MOV v16.16b, v8.16b # v17 := vacc4x4567 MOV v17.16b, v9.16b # v18 := vacc5x0123 MOV v18.16b, v8.16b # v19 := vacc5x4567 MOV v19.16b, v9.16b # v20 := vacc6x0123 MOV v20.16b, v8.16b # v21 := vacc6x4567 MOV v21.16b, v9.16b # v22 := vacc7x0123 MOV v22.16b, v8.16b # v23 := vacc7x4567 MOV v23.16b, v9.16b # Fold mul by 4 to get byte offset for requant scale. # Add offset to the base pointer ADD x17, x17, x16, lsl#2 // Load requantization_scale // - v26 = requantization_scale channels 0-3 // - v27 = requantization_scale channels 4-7 LD1 {v26.4s}, [x17], 16 # a1 CMP x0, 2 ADD x9, x3, x4 CSEL x9, x3, x9, LO # a2 ADD x10, x9, x4 CSEL x10, x9, x10, LS # a3 CMP x0, 4 ADD x11, x10, x4 CSEL x11, x10, x11, LO # a4 ADD x12, x11, x4 CSEL x12, x11, x12, LS # a5 CMP x0, 6 ADD x13, x12, x4 CSEL x13, x12, x13, LO # a6 ADD x14, x13, x4 CSEL x14, x13, x14, LS # a7 CMP x0, 8 ADD x15, x14, x4 CSEL x15, x14, x15, NE SUBS x2, x2, 8 B.LO 1f #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 5 #endif 0: // b0-7 (channel 0) LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b # va0 - va7 := va - va_zero_point LD1 {v0.8b}, [x3], 8 SUB_ZERO_POINT v0.8h, v0.8b, v24.8b LD1 {v1.8b}, [x9], 8 SUB_ZERO_POINT v1.8h, v1.8b, v24.8b LD1 {v2.8b}, [x10], 8 SUB_ZERO_POINT v2.8h, v2.8b, v24.8b LD1 {v3.8b}, [x11], 8 SUB_ZERO_POINT v3.8h, v3.8b, v24.8b LD1 {v4.8b}, [x12], 8 SUB_ZERO_POINT v4.8h, v4.8b, v24.8b LD1 {v5.8b}, [x13], 8 SUB_ZERO_POINT v5.8h, v5.8b, v24.8b LD1 {v6.8b}, [x14], 8 SUB_ZERO_POINT v6.8h, v6.8b, v24.8b LD1 {v7.8b}, [x15], 8 SUB_ZERO_POINT v7.8h, v7.8b, v24.8b // b0-7 (channel 1) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0] SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0] SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0] SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0] SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0] SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0] SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0] SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0] SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0] SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0] SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0] SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0] SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0] SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0] SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0] // b0-7 (channel 2) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1] SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1] SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1] SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1] SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1] SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1] SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1] SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1] SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1] SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1] SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1] SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1] SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1] SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1] SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1] // b0-7 (channel 3) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2] SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2] SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2] SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2] SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2] SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2] SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2] SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2] SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2] SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2] SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2] SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2] SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2] SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2] SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2] // b0-7 (channel 4) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3] SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3] SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3] SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3] SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3] SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3] SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3] SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3] SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3] SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3] SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3] SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3] SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3] SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3] SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3] // b0-7 (channel 5) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4] SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4] SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4] SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4] SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4] SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4] SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4] SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4] SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4] SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4] SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4] SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4] SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4] SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4] SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4] // b0-7 (channel 6) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5] SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5] SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5] SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5] SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5] SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5] SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5] SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5] SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5] SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5] SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5] SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5] SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5] SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5] SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5] // b0-7 (channel 7) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6] SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6] SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6] SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6] SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6] SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6] SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6] SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6] SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6] SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6] SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6] SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6] SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6] SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6] SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6] SUBS x2, x2, 8 SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7] SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7] SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7] SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7] SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7] SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7] SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7] SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7] SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7] SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7] SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7] SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7] SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7] SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7] SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7] SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7] B.HS 0b 1: CMP x2, -8 B.EQ 2f // Adjust a0-a7 ADD x3, x3, x2 ADD x9, x9, x2 ADD x10, x10, x2 ADD x11, x11, x2 ADD x12, x12, x2 ADD x13, x13, x2 ADD x14, x14, x2 ADD x15, x15, x2 // a_shift = 8 * k - 64 LSL x2, x2, 3 FMOV d29, x2 USHL d24, d24, d29 // Load x0-a7 LD1 {v0.8b}, [x3], 8 USHL d0, d0, d29 SUB_ZERO_POINT v0.8h, v0.8b, v24.8b LD1 {v1.8b}, [x9], 8 USHL d1, d1, d29 SUB_ZERO_POINT v1.8h, v1.8b, v24.8b LD1 {v2.8b}, [x10], 8 USHL d2, d2, d29 SUB_ZERO_POINT v2.8h, v2.8b, v24.8b LD1 {v3.8b}, [x11], 8 USHL d3, d3, d29 SUB_ZERO_POINT v3.8h, v3.8b, v24.8b LD1 {v4.8b}, [x12], 8 USHL d4, d4, d29 SUB_ZERO_POINT v4.8h, v4.8b, v24.8b LD1 {v5.8b}, [x13], 8 USHL d5, d5, d29 SUB_ZERO_POINT v5.8h, v5.8b, v24.8b LD1 {v6.8b}, [x14], 8 USHL d6, d6, d29 SUB_ZERO_POINT v6.8h, v6.8b, v24.8b LD1 {v7.8b}, [x15], 8 USHL d7, d7, d29 SUB_ZERO_POINT v7.8h, v7.8b, v24.8b // Channel 0 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0] SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0] SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0] SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0] SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0] SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0] SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0] SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0] SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0] SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0] SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0] SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0] SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0] SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0] SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0] SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0] CMP x2, -48 B.LO 2f // Channel 1 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1] SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1] SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1] SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1] SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1] SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1] SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1] SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1] SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1] SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1] SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1] SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1] SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1] SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1] SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1] SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1] B.LS 2f // Channel 2 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2] SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2] SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2] SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2] SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2] SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2] SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2] SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2] SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2] SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2] SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2] SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2] SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2] SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2] SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2] SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2] CMP x2, -32 B.LO 2f // Channel 3 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3] SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3] SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3] SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3] SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3] SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3] SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3] SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3] SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3] SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3] SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3] SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3] SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3] SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3] SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3] SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3] B.LS 2f // Channel 4 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4] SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4] SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4] SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4] SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4] SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4] SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4] SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4] SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4] SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4] SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4] SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4] SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4] SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4] SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4] SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4] CMP x2, -16 B.LO 2f // Channel 5 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5] SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5] SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5] SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5] SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5] SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5] SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5] SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5] SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5] SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5] SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5] SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5] SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5] SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5] SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5] SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5] B.LS 2f // Channel 6 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6] SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6] SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6] SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6] SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6] SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6] SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6] SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6] SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6] SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6] SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6] SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6] SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6] SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6] SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6] SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6] #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 4 #endif 2: # Load requant scale for channels 4-7 LD1 {v27.4s}, [x17] // Load zero_point: // - v29 = vzero_point LD1R {v29.8h}, [x8], 2 // Load max: // - v30 = vmax LD1R {v30.16b}, [x8], 1 // Load min: // - v31 = vmin LD1R {v31.16b}, [x8] SCVTF v8.4s, v8.4s SCVTF v9.4s, v9.4s SCVTF v10.4s, v10.4s SCVTF v11.4s, v11.4s SCVTF v12.4s, v12.4s SCVTF v13.4s, v13.4s SCVTF v14.4s, v14.4s SCVTF v15.4s, v15.4s SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v8.4s, v8.4s, v26.4s FMUL v9.4s, v9.4s, v27.4s FMUL v10.4s, v10.4s, v26.4s FMUL v11.4s, v11.4s, v27.4s FMUL v12.4s, v12.4s, v26.4s FMUL v13.4s, v13.4s, v27.4s FMUL v14.4s, v14.4s, v26.4s FMUL v15.4s, v15.4s, v27.4s FMUL v16.4s, v16.4s, v26.4s FMUL v17.4s, v17.4s, v27.4s FMUL v18.4s, v18.4s, v26.4s FMUL v19.4s, v19.4s, v27.4s FMUL v20.4s, v20.4s, v26.4s FMUL v21.4s, v21.4s, v27.4s FMUL v22.4s, v22.4s, v26.4s FMUL v23.4s, v23.4s, v27.4s FCVTNS v8.4s, v8.4s FCVTNS v9.4s, v9.4s FCVTNS v10.4s, v10.4s FCVTNS v11.4s, v11.4s FCVTNS v12.4s, v12.4s FCVTNS v13.4s, v13.4s FCVTNS v14.4s, v14.4s FCVTNS v15.4s, v15.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s SQXTN v8.4h, v8.4s SQXTN v10.4h, v10.4s SQXTN v12.4h, v12.4s SQXTN v14.4h, v14.4s SQXTN v16.4h, v16.4s SQXTN v18.4h, v18.4s SQXTN v20.4h, v20.4s SQXTN v22.4h, v22.4s SQXTN2 v8.8h, v9.4s SQXTN2 v10.8h, v11.4s SQXTN2 v12.8h, v13.4s SQXTN2 v14.8h, v15.4s SQXTN2 v16.8h, v17.4s SQXTN2 v18.8h, v19.4s SQXTN2 v20.8h, v21.4s SQXTN2 v22.8h, v23.4s SQADD v8.8h, v8.8h, v29.8h SQADD v10.8h, v10.8h, v29.8h SQADD v12.8h, v12.8h, v29.8h SQADD v14.8h, v14.8h, v29.8h SQADD v16.8h, v16.8h, v29.8h SQADD v18.8h, v18.8h, v29.8h SQADD v20.8h, v20.8h, v29.8h SQADD v22.8h, v22.8h, v29.8h SQXTUN v8.8b, v8.8h SQXTUN v12.8b, v12.8h SQXTUN v16.8b, v16.8h SQXTUN v20.8b, v20.8h SQXTUN2 v8.16b, v10.8h SQXTUN2 v12.16b, v14.8h SQXTUN2 v16.16b, v18.8h SQXTUN2 v20.16b, v22.8h UMIN v8.16b, v8.16b, v30.16b UMIN v12.16b, v12.16b, v30.16b UMIN v16.16b, v16.16b, v30.16b UMIN v20.16b, v20.16b, v30.16b UMAX v8.16b, v8.16b, v31.16b UMAX v12.16b, v12.16b, v31.16b UMAX v16.16b, v16.16b, v31.16b UMAX v20.16b, v20.16b, v31.16b // Compute c0-c7 ADD x9, x6, x7 CMP x0, 2 CSEL x9, x6, x9, LO ADD x10, x9, x7 CSEL x10, x9, x10, LS ADD x11, x10, x7 CMP x0, 4 CSEL x11, x10, x11, LO ADD x12, x11, x7 CSEL x12, x11, x12, LS ADD x13, x12, x7 CMP x0, 6 CSEL x13, x12, x13, LO ADD x14, x13, x7 CSEL x14, x13, x14, LS ADD x15, x14, x7 CMP x0, 8 CSEL x15, x14, x15, NE CMP x1, 8 B.NE 4f // Store results ST1 {v8.d}[0], [x6] ST1 {v8.d}[1], [x9] ST1 {v12.d}[0], [x10] ST1 {v12.d}[1], [x11] ST1 {v16.d}[0], [x12] ST1 {v16.d}[1], [x13] ST1 {v20.d}[0], [x14] ST1 {v20.d}[1], [x15] LDP d9, d8, [sp, -64] LDP d11, d10, [sp, -48] LDP d13, d12, [sp, -32] LDP d15, d14, [sp, -16] RET #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 3 #endif 4: CMP x1, 4 B.LO 5f ST1 {v8.s}[0], [x6], 4 ST1 {v8.s}[2], [x9], 4 ST1 {v12.s}[0], [x10], 4 ST1 {v12.s}[2], [x11], 4 ST1 {v16.s}[0], [x12], 4 ST1 {v16.s}[2], [x13], 4 ST1 {v20.s}[0], [x14], 4 ST1 {v20.s}[2], [x15], 4 SUB x1, x1, 4 EXT v8.16b, v8.16b, v8.16b, 4 EXT v12.16b, v12.16b, v12.16b, 4 EXT v16.16b, v16.16b, v16.16b, 4 EXT v20.16b, v20.16b, v20.16b, 4 5: CMP x1, 2 B.LO 6f ST1 {v8.h}[0], [x6], 2 ST1 {v8.h}[4], [x9], 2 ST1 {v12.h}[0], [x10], 2 ST1 {v12.h}[4], [x11], 2 ST1 {v16.h}[0], [x12], 2 ST1 {v16.h}[4], [x13], 2 ST1 {v20.h}[0], [x14], 2 ST1 {v20.h}[4], [x15], 2 SUB x1, x1, 2 EXT v8.16b, v8.16b, v8.16b, 2 EXT v12.16b, v12.16b, v12.16b, 2 EXT v16.16b, v16.16b, v16.16b, 2 EXT v20.16b, v20.16b, v20.16b, 2 6: CMP x1, 1 B.LO 7f ST1 {v8.b}[0], [x6] ST1 {v8.b}[8], [x9] ST1 {v12.b}[0], [x10] ST1 {v12.b}[8], [x11] ST1 {v16.b}[0], [x12] ST1 {v16.b}[8], [x13] ST1 {v20.b}[0], [x14] ST1 {v20.b}[8], [x15] 7: LDP d9, d8, [sp, -64] LDP d11, d10, [sp, -48] LDP d13, d12, [sp, -32] LDP d15, d14, [sp, -16] RET END_FUNCTION pytorch_q8gemm_ukernel_8x8__aarch64_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
18,740
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> .syntax unified # Args passed via 4 registers (16 bytes) # r0: mr # r1: nr # r2: k # r3: a # # Args passed via stack. # TOS # |-----------| # |a_stride | 0 # |w | 4 # |c | 8 # |c_stride | 12 # |out ch indx| 16 # |params | 20 # |-----------| # # After loading w pointer in ip reg. # And after pushing r4-r9 and d8-d15 on stack # |-----------| # |d8 - d15 | 0 # |r4 - r9 | 64 # |a_stride | 88 # |w | 92 # |c | 96 # |c_stride | 100 # |out ch indx| 104 # |params | 108 # |-----------| # # # New Struct for pytorch_qnnp_conv_quantization_params # kernel zp : 0 offset # input zp : 2 # requantization_scale : 4 # output zp : 8 # output max : 10 # output min : 11 # vfmin : 12 # vfmax : 16 # vfmagic : 20 # vimagic : 24 # # void pytorch_q8gemm_ukernel_4x8__aarch32_neon( # size_t mr, # size_t nr, # size_t k, # const uint8_t*restrict a, # size_t a_stride, # const void*restrict w, # uint8_t*restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8gemm_ukernel_4x8__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load w # - ip = w LDR ip, [sp, 4] PUSH {r4, r5, r6, r7, r8, r9} # Load quantization params # - r7 = quantization_params LDR r7, [sp, 44] VPUSH {d8-d15} # Load bias0123, bias4567 VLDM ip!, {d16-d19} # Load output channel index LDR r5, [sp, 104] # Load pointer to per channel zero points array # Post-index: After load increment r7 by 4 LDR r4, [r7], #4 # Load a_zero_point: # - d14 = a_zero_point VLD1.8 {d14[]}, [r7] # Load a_stride # - r6 = a_stride LDR r9, [sp, 88] # Byte offset of output channel index for requant scale. LSL r6, r5, 2 # Load pointer to per channel requant scale # Register offset, load r7+4 LDR r8, [r7, 4] # Add output_channel_index to the b_zero_point pointer ADD r4, r4, r5 # Load b_zero_point: # - d15 = b_zero_point VLD1.8 {d15}, [r4] # add 8 bytes to get to vfmax ADD r7, r7, 12 CMP r0, 2 ADD r4, r3, r9 # Store in r8 pointer from where to load requant scale. ADD r8, r8, r6 MOVLO r4, r3 ADD r5, r4, r9 # q10 := vacc1x0123 VMOV.I32 q10, q8 MOVLS r5, r4 # q11 := vacc1x4567 VMOV.I32 q11, q9 ADD r6, r5, r9 # q12 := vacc2x0123 VMOV.I32 q12, q8 CMP r0, 4 # q13 := vacc2x4567 VMOV.I32 q13, q9 MOVNE r6, r5 # q14 := vacc3x0123 VMOV.I32 q14, q8 SUBS r2, r2, 8 # q15 := vacc3x4567 VMOV.I32 q15, q9 BLO 1f .p2align 5 0: # Load a0 # - d1 = a0 VLD1.8 {d1}, [r3]! # Load a1 # - d3 = a1 VLD1.8 {d3}, [r4]! # Load b0-b7 (channel 0) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # Load a2 # - d5 = a2 VLD1.8 {d5}, [r5]! # q0 = va0 = a0 SUB_ZERO_POINT q0, d1, d14 # Load a3 # - d7 = a3 VLD1.8 {d7}, [r6]! # q1 = va1 = a1 SUB_ZERO_POINT q1, d3, d14 # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 0) # - d9 = vb4567 (channel 0) VSUBL.U8 q4, d9, d15 # q2 = va2 = a2 SUB_ZERO_POINT q2, d5, d14 # q3 = va3 = a3 SUB_ZERO_POINT q3, d7, d14 ### Channel 0 ### # Load b0-b7 (channel 1) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[0] VMLAL.S16 q8, d8, d0[0] # vacc0x4567 += vb4567 * va0[0] VMLAL.S16 q9, d9, d0[0] # vacc1x0123 += vb0123 * va1[0] VMLAL.S16 q10, d8, d2[0] # vacc1x4567 += vb4567 * va1[0] VMLAL.S16 q11, d9, d2[0] # vacc2x0123 += vb0123 * va2[0] VMLAL.S16 q12, d8, d4[0] # vacc2x4567 += vb4567 * va2[0] VMLAL.S16 q13, d9, d4[0] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 1) # - d11 = vb4567 (channel 1) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[0] VMLAL.S16 q14, d8, d6[0] # vacc3x4567 += vb4567 * va3[0] VMLAL.S16 q15, d9, d6[0] ### Channel 1 ### # Load b0-b7 (channel 2) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[1] VMLAL.S16 q8, d10, d0[1] # vacc0x4567 += vb4567 * va0[1] VMLAL.S16 q9, d11, d0[1] # vacc1x0123 += vb0123 * va1[1] VMLAL.S16 q10, d10, d2[1] # vacc1x4567 += vb4567 * va1[1] VMLAL.S16 q11, d11, d2[1] # vacc2x0123 += vb0123 * va2[1] VMLAL.S16 q12, d10, d4[1] # vacc2x4567 += vb4567 * va2[1] VMLAL.S16 q13, d11, d4[1] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 2) # - d9 = vb4567 (channel 2) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[1] VMLAL.S16 q14, d10, d6[1] # vacc3x4567 += vb4567 * va3[1] VMLAL.S16 q15, d11, d6[1] ### Channel 2 ### # Load b0-b7 (channel 3) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[2] VMLAL.S16 q8, d8, d0[2] # vacc0x4567 += vb4567 * va0[2] VMLAL.S16 q9, d9, d0[2] # vacc1x0123 += vb0123 * va1[2] VMLAL.S16 q10, d8, d2[2] # vacc1x4567 += vb4567 * va1[2] VMLAL.S16 q11, d9, d2[2] # vacc2x0123 += vb0123 * va2[2] VMLAL.S16 q12, d8, d4[2] # vacc2x4567 += vb4567 * va2[2] VMLAL.S16 q13, d9, d4[2] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 3) # - d11 = vb4567 (channel 3) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[2] VMLAL.S16 q14, d8, d6[2] # vacc3x4567 += vb4567 * va3[2] VMLAL.S16 q15, d9, d6[2] ### Channel 3 ### # Load b0-b7 (channel 4) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[3] VMLAL.S16 q8, d10, d0[3] # vacc0x4567 += vb4567 * va0[3] VMLAL.S16 q9, d11, d0[3] # vacc1x0123 += vb0123 * va1[3] VMLAL.S16 q10, d10, d2[3] # vacc1x4567 += vb4567 * va1[3] VMLAL.S16 q11, d11, d2[3] # vacc2x0123 += vb0123 * va2[3] VMLAL.S16 q12, d10, d4[3] # vacc2x4567 += vb4567 * va2[3] VMLAL.S16 q13, d11, d4[3] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 4) # - d11 = vb4567 (channel 4) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[3] VMLAL.S16 q14, d10, d6[3] # vacc3x4567 += vb4567 * va3[3] VMLAL.S16 q15, d11, d6[3] ### Channel 4 ### # Load b0-b7 (channel 5) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[4] VMLAL.S16 q8, d8, d1[0] # vacc0x4567 += vb4567 * va0[4] VMLAL.S16 q9, d9, d1[0] # vacc1x0123 += vb0123 * va1[4] VMLAL.S16 q10, d8, d3[0] # vacc1x4567 += vb4567 * va1[4] VMLAL.S16 q11, d9, d3[0] # vacc2x0123 += vb0123 * va2[4] VMLAL.S16 q12, d8, d5[0] # vacc2x4567 += vb4567 * va2[4] VMLAL.S16 q13, d9, d5[0] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 5) # - d9 = vb4567 (channel 5) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[4] VMLAL.S16 q14, d8, d7[0] # vacc3x4567 += vb4567 * va3[4] VMLAL.S16 q15, d9, d7[0] ### Channel 5 ### # Load b0-b7 (channel 6) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[5] VMLAL.S16 q8, d10, d1[1] # vacc0x4567 += vb4567 * va0[5] VMLAL.S16 q9, d11, d1[1] # vacc1x0123 += vb0123 * va1[5] VMLAL.S16 q10, d10, d3[1] # vacc1x4567 += vb4567 * va1[5] VMLAL.S16 q11, d11, d3[1] # vacc2x0123 += vb0123 * va2[5] VMLAL.S16 q12, d10, d5[1] # vacc2x4567 += vb4567 * va2[5] VMLAL.S16 q13, d11, d5[1] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 6) # - d9 = vb4567 (channel 6) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[5] VMLAL.S16 q14, d10, d7[1] # vacc3x4567 += vb4567 * va3[5] VMLAL.S16 q15, d11, d7[1] ### Channel 6 ### # Load b0-b7 (channel 7) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[6] VMLAL.S16 q8, d8, d1[2] # vacc0x4567 += vb4567 * va0[6] VMLAL.S16 q9, d9, d1[2] # vacc1x0123 += vb0123 * va1[6] VMLAL.S16 q10, d8, d3[2] # vacc1x4567 += vb4567 * va1[6] VMLAL.S16 q11, d9, d3[2] # vacc2x0123 += vb0123 * va2[6] VMLAL.S16 q12, d8, d5[2] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 7) # - d11 = vb4567 (channel 7) VSUBL.U8 q5, d11, d15 # vacc2x4567 += vb4567 * va2[6] VMLAL.S16 q13, d9, d5[2] # vacc3x0123 += vb0123 * va3[6] VMLAL.S16 q14, d8, d7[2] # vacc3x4567 += vb4567 * va3[6] VMLAL.S16 q15, d9, d7[2] ### Channel 8 ### SUBS r2, r2, 8 # vacc0x0123 += vb0123 * va0[7] VMLAL.S16 q8, d10, d1[3] # vacc0x4567 += vb4567 * va0[7] VMLAL.S16 q9, d11, d1[3] # vacc1x0123 += vb0123 * va1[7] VMLAL.S16 q10, d10, d3[3] # vacc1x4567 += vb4567 * va1[7] VMLAL.S16 q11, d11, d3[3] # vacc2x0123 += vb0123 * va2[7] VMLAL.S16 q12, d10, d5[3] # vacc2x4567 += vb4567 * va2[7] VMLAL.S16 q13, d11, d5[3] # vacc3x0123 += vb0123 * va3[7] VMLAL.S16 q14, d10, d7[3] # vacc3x4567 += vb4567 * va3[7] VMLAL.S16 q15, d11, d7[3] BHS 0b 1: CMP r2, -8 BEQ 2f # Adjust a0, a1, a2, a3 ADD r3, r2 ADD r4, r2 ADD r5, r2 ADD r6, r2 # a_shift = 8 * k - 64 LSL r2, r2, 3 VDUP.32 d13, r2 # Load a0 # - d1 = a0 VLD1.8 {d1}, [r3] # Load a1 # - d3 = a1 VLD1.8 {d3}, [r4] # Load b0-b7 (channel 0) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # Load a2 # - d5 = a2 VLD1.8 {d5}, [r5] # q0 = va0 = a0 VSHL.U64 d1, d1, d13 SUB_ZERO_POINT q0, d1, d14 # Load a3 # - d7 = a3 VLD1.8 {d7}, [r6] # q1 = va1 = a1 VSHL.U64 d3, d3, d13 SUB_ZERO_POINT q1, d3, d14 # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 0) # - d9 = vb4567 (channel 0) VSUBL.U8 q4, d9, d15 # q2 = va2 = a2 VSHL.U64 d5, d5, d13 SUB_ZERO_POINT q2, d5, d14 # q3 = va3 = a3 VSHL.U64 d7, d7, d13 SUB_ZERO_POINT q3, d7, d14 ### Channel 0 ### # vacc0x0123 += vb0123 * va0[0] VMLAL.S16 q8, d8, d0[0] # vacc0x4567 += vb4567 * va0[0] VMLAL.S16 q9, d9, d0[0] # vacc1x0123 += vb0123 * va1[0] VMLAL.S16 q10, d8, d2[0] # vacc1x4567 += vb4567 * va1[0] VMLAL.S16 q11, d9, d2[0] # vacc2x0123 += vb0123 * va2[0] VMLAL.S16 q12, d8, d4[0] # vacc2x4567 += vb4567 * va2[0] VMLAL.S16 q13, d9, d4[0] # vacc3x0123 += vb0123 * va3[0] VMLAL.S16 q14, d8, d6[0] # vacc3x4567 += vb4567 * va3[0] VMLAL.S16 q15, d9, d6[0] CMP r2, -48 BLO 2f ### Channel 1 ### # Load b0-b7 (channel 1) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 1) # - d11 = vb4567 (channel 1) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[1] VMLAL.S16 q8, d10, d0[1] # vacc0x4567 += vb4567 * va0[1] VMLAL.S16 q9, d11, d0[1] # vacc1x0123 += vb0123 * va1[1] VMLAL.S16 q10, d10, d2[1] # vacc1x4567 += vb4567 * va1[1] VMLAL.S16 q11, d11, d2[1] # vacc2x0123 += vb0123 * va2[1] VMLAL.S16 q12, d10, d4[1] # vacc2x4567 += vb4567 * va2[1] VMLAL.S16 q13, d11, d4[1] # vacc3x0123 += vb0123 * va3[1] VMLAL.S16 q14, d10, d6[1] # vacc3x4567 += vb4567 * va3[1] VMLAL.S16 q15, d11, d6[1] ### Channel 2 ### BLS 2f # Load b0-b7 (channel 2) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 2) # - d9 = vb4567 (channel 2) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[2] VMLAL.S16 q8, d8, d0[2] # vacc0x4567 += vb4567 * va0[2] VMLAL.S16 q9, d9, d0[2] # vacc1x0123 += vb0123 * va1[2] VMLAL.S16 q10, d8, d2[2] # vacc1x4567 += vb4567 * va1[2] VMLAL.S16 q11, d9, d2[2] # vacc2x0123 += vb0123 * va2[2] VMLAL.S16 q12, d8, d4[2] # vacc2x4567 += vb4567 * va2[2] VMLAL.S16 q13, d9, d4[2] # vacc3x0123 += vb0123 * va3[2] VMLAL.S16 q14, d8, d6[2] # vacc3x4567 += vb4567 * va3[2] VMLAL.S16 q15, d9, d6[2] ### Channel 3 ### CMP r2, -32 BLO 2f # Load b0-b7 (channel 3) # - d9 = b0-b7 VLD1.8 {d11}, [ip:64]! # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 3) # - d9 = vb4567 (channel 3) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[3] VMLAL.S16 q8, d10, d0[3] # vacc0x4567 += vb4567 * va0[3] VMLAL.S16 q9, d11, d0[3] # vacc1x0123 += vb0123 * va1[3] VMLAL.S16 q10, d10, d2[3] # vacc1x4567 += vb4567 * va1[3] VMLAL.S16 q11, d11, d2[3] # vacc2x0123 += vb0123 * va2[3] VMLAL.S16 q12, d10, d4[3] # vacc2x4567 += vb4567 * va2[3] VMLAL.S16 q13, d11, d4[3] # vacc3x0123 += vb0123 * va3[3] VMLAL.S16 q14, d10, d6[3] # vacc3x4567 += vb4567 * va3[3] VMLAL.S16 q15, d11, d6[3] ### Channel 4 ### BLS 2f # Load b0-b7 (channel 4) # - d11 = b0-b7 VLD1.8 {d9}, [ip:64]! # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 4) # - d11 = vb4567 (channel 4) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[4] VMLAL.S16 q8, d8, d1[0] # vacc0x4567 += vb4567 * va0[4] VMLAL.S16 q9, d9, d1[0] # vacc1x0123 += vb0123 * va1[4] VMLAL.S16 q10, d8, d3[0] # vacc1x4567 += vb4567 * va1[4] VMLAL.S16 q11, d9, d3[0] # vacc2x0123 += vb0123 * va2[4] VMLAL.S16 q12, d8, d5[0] # vacc2x4567 += vb4567 * va2[4] VMLAL.S16 q13, d9, d5[0] # vacc3x0123 += vb0123 * va3[4] VMLAL.S16 q14, d8, d7[0] # vacc3x4567 += vb4567 * va3[4] VMLAL.S16 q15, d9, d7[0] ### Channel 5 ### CMP r2, -16 BLO 2f # Load b0-b7 (channel 5) # - d13 = b0-b7 VLD1.8 {d11}, [ip:64]! # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 5) # - d11 = vb4567 (channel 5) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[5] VMLAL.S16 q8, d10, d1[1] # vacc0x4567 += vb4567 * va0[5] VMLAL.S16 q9, d11, d1[1] # vacc1x0123 += vb0123 * va1[5] VMLAL.S16 q10, d10, d3[1] # vacc1x4567 += vb4567 * va1[5] VMLAL.S16 q11, d11, d3[1] # vacc2x0123 += vb0123 * va2[5] VMLAL.S16 q12, d10, d5[1] # vacc2x4567 += vb4567 * va2[5] VMLAL.S16 q13, d11, d5[1] # vacc3x0123 += vb0123 * va3[5] VMLAL.S16 q14, d10, d7[1] # vacc3x4567 += vb4567 * va3[5] VMLAL.S16 q15, d11, d7[1] ### Channel 6 ### BLS 2f # Load b0-b7 (channel 6) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 6) # - d9 = vb4567 (channel 6) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[6] VMLAL.S16 q8, d8, d1[2] # vacc0x4567 += vb4567 * va0[6] VMLAL.S16 q9, d9, d1[2] # vacc1x0123 += vb0123 * va1[6] VMLAL.S16 q10, d8, d3[2] # vacc1x4567 += vb4567 * va1[6] VMLAL.S16 q11, d9, d3[2] # vacc2x0123 += vb0123 * va2[6] VMLAL.S16 q12, d8, d5[2] # vacc2x4567 += vb4567 * va2[6] VMLAL.S16 q13, d9, d5[2] # vacc3x0123 += vb0123 * va3[6] VMLAL.S16 q14, d8, d7[2] # vacc3x4567 += vb4567 * va3[6] VMLAL.S16 q15, d9, d7[2] .p2align 4 2: # Load requantization_scale: # - d12 = requantization_scale VLD1.32 {d12, d13}, [r8]! # Load vfmax: VLD1.32 {d10[], d11[]}, [r7]! VLD1.32 {d4, d5}, [r8] # Load vfmin: VLD1.32 {d8[], d9[]}, [r7]! # Load vfmagic: VLD1.32 {d0[], d1[]}, [r7]! # Load vimagic: VLD1.32 {d2[], d3[]}, [r7]! # Moved here to hide load latency on d14 VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q6 VMUL.F32 q9, q9, q2 VMUL.F32 q10, q10, q6 VMUL.F32 q11, q11, q2 VMUL.F32 q12, q12, q6 VMUL.F32 q13, q13, q2 VMUL.F32 q14, q14, q6 VMUL.F32 q15, q15, q2 VMIN.F32 q8, q8, q5 VMIN.F32 q9, q9, q5 VMIN.F32 q10, q10, q5 VMIN.F32 q11, q11, q5 VMIN.F32 q12, q12, q5 VMIN.F32 q13, q13, q5 VMIN.F32 q14, q14, q5 VMIN.F32 q15, q15, q5 VMAX.F32 q8, q8, q4 VMAX.F32 q9, q9, q4 VMAX.F32 q10, q10, q4 VMAX.F32 q11, q11, q4 VMAX.F32 q12, q12, q4 VMAX.F32 q13, q13, q4 VMAX.F32 q14, q14, q4 VMAX.F32 q15, q15, q4 VADD.F32 q8, q8, q0 VADD.F32 q9, q9, q0 VADD.F32 q10, q10, q0 VADD.F32 q11, q11, q0 VADD.F32 q12, q12, q0 VADD.F32 q13, q13, q0 VADD.F32 q14, q14, q0 VADD.F32 q15, q15, q0 # Load c, c_stride: # - r2 = c # - r2 = c_stride LDRD r2, r3, [sp, 96] VSUB.S32 q8, q8, q1 VSUB.S32 q9, q9, q1 VSUB.S32 q10, q10, q1 VSUB.S32 q11, q11, q1 VSUB.S32 q12, q12, q1 VSUB.S32 q13, q13, q1 VSUB.S32 q14, q14, q1 VSUB.S32 q15, q15, q1 ADD r4, r2, r3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 CMP r0, 2 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 MOVLO r4, r2 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 ADD r5, r4, r3 VQMOVUN.S16 d16, q8 MOVLS r5, r4 VQMOVUN.S16 d17, q9 VQMOVUN.S16 d18, q10 CMP r0, 4 ADD r3, r5, r3 MOVNE r3, r5 CMP r1, 8 VQMOVUN.S16 d19, q11 BNE 4f VST1.8 {d16}, [r2] VST1.8 {d17}, [r4] VST1.8 {d18}, [r5] VST1.8 {d19}, [r3] VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9} BX lr .p2align 3 4: CMP r1, 4 BLO 5f VST1.32 {d16[0]}, [r2]! VST1.32 {d17[0]}, [r4]! VST1.32 {d18[0]}, [r5]! VST1.32 {d19[0]}, [r3]! SUB r1, 4 VEXT.8 q8, q8, q8, 4 VEXT.8 q9, q9, q9, 4 5: CMP r1, 2 BLO 6f VST1.16 {d16[0]}, [r2]! VST1.16 {d17[0]}, [r4]! VST1.16 {d18[0]}, [r5]! VST1.16 {d19[0]}, [r3]! SUB r1, 2 VEXT.8 q8, q8, q8, 2 VEXT.8 q9, q9, q9, 2 6: TEQ r1, 0 BEQ 7f VST1.8 {d16[0]}, [r2] VST1.8 {d17[0]}, [r4] VST1.8 {d18[0]}, [r5] VST1.8 {d19[0]}, [r3] 7: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9} BX lr END_FUNCTION pytorch_q8gemm_ukernel_4x8__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
11,694
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8c2-xzp-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> .syntax unified # void pytorch_q8gemm_xzp_ukernel_4x8c2__neon( # size_t mr, # size_t nr, # size_t k, # const uint8_t* restrict a, # size_t a_stride, # const int32_t* restrict a_sum, # const void* restrict w, # uint8_t* restrict c, # size_t c_stride, # const union pytorch_qnnp_q31_requantization_params requantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load w # - ip = w LDR ip, [sp, 8] # Load bias0123(q8), bias4567(q9) # q8 := vacc0x0123 # q9 := vacc0x4567 VLD1.8 {d16-d19}, [ip]! # q10 := vacc1x0123 VMOV.I32 q10, q8 # q11 := vacc1x4567 VMOV.I32 q11, q9 # q12 := vacc2x0123 VMOV.I32 q12, q8 # q13 := vacc2x4567 VMOV.I32 q13, q9 # q14 := vacc3x0123 VMOV.I32 q14, q8 # q15 := vacc3x4567 VMOV.I32 q15, q9 PUSH {r4, r5, r6, r7, r8, r9, r10, r11} VPUSH {d8-d15} # r3 := a0 # r4 := a1 # r5 := a2 # r6 := a3 # r7 := a_sum0 # r8 := a_sum1 # r9 := a_sum2 # r10 := a_sum3 # a_sum0 := a_sum LDR r7, [sp, 100] # Load a_stride # - ip = a_stride LDR r10, [sp, 96] # compare mr to 2 CMP r0, 2 # a1 += a_stride ADD r4, r3, r10 # mr < 2, a1 := a0 MOVLO r4, r3 # r8 := a_sum1 ADD r8, r7, 4 # mr < 2, a_sum1 := a_sum0 MOVLO r8, r7 # r5 := a2 ADD r5, r4, r10 # mr <= 2, a2 := a1 MOVLS r5, r4 # r9 := a_sum2 ADD r9, r8, 4 # mr <= 2, a_sum2 := a_sum1 MOVLS r9, r8 # compare mr to 4 CMP r0, 4 # r6 := a3 ADD r6, r5, r10 # mr != 4, a3 := a2 MOVNE r6, r5 # a_sum3 := a_sum2 + 1 # r10 := a_sum3 ADD r10, r9, 4 # mr != 4, a_sum3 := a_sum2 MOVNE r10, r9 # load a_sum # q0: va_sum0 VLD1.32 {d0[], d1[]}, [r7] # q1: va_sum1 VLD1.32 {d2[], d3[]}, [r8] # q2: va_sum2 VLD1.32 {d4[], d5[]}, [r9] # q3: va_sum3 VLD1.32 {d6[], d7[]}, [r10] # accumulate a_sum into vacc # vacc0x0123 = vaddq_s32(vacc0x0123, va_sum0) VADD.I32 q8, q8, q0 # vacc0x4567 = vaddq_s32(vacc0x4567, va_sum0) VADD.I32 q9, q9, q0 # vacc1x0123 = vaddq_s32(vacc1x0123, va_sum1) VADD.I32 q10, q10, q1 # vacc1x4567 = vaddq_s32(vacc1x4567, va_sum1) VADD.I32 q11, q11, q1 # vacc2x0123 = vaddq_s32(vacc2x0123, va_sum2) VADD.I32 q12, q12, q2 # vacc2x4567 = vaddq_s32(vacc2x4567, va_sum2) VADD.I32 q13, q13, q2 # vacc3x0123 = vaddq_s32(vacc3x0123, va_sum3) VADD.I32 q14, q14, q3 # vacc3x4567 = vaddq_s32(vacc3x4567, va_sum3) VADD.I32 q15, q15, q3 # k -= 8 SUBS r2, r2, 8 BLO 1f .p2align 5 0: # load a # d0 := va0x01234567 VLD1.8 {d0}, [r3]! # d1 := va1x01234567 VLD1.8 {d1}, [r4]! # d2 := va1x01234567 VLD1.8 {d2}, [r5]! # d3 := va2x01234567 VLD1.8 {d3}, [r6]! ##### k = 0, 1 ##### # load b # q2 := vb01234567x01 VLD1.8 {d4, d5}, [ip]! VMULL.U8 q4, d0, d4 VPADAL.U16 q8, q4 VMULL.U8 q5, d0, d5 VPADAL.U16 q9, q5 VMULL.U8 q6, d1, d4 VPADAL.U16 q10, q6 VMULL.U8 q7, d1, d5 VPADAL.U16 q11, q7 VMULL.U8 q4, d2, d4 VPADAL.U16 q12, q4 VMULL.U8 q5, d2, d5 VPADAL.U16 q13, q5 VMULL.U8 q6, d3, d4 VPADAL.U16 q14, q6 VMULL.U8 q7, d3, d5 VPADAL.U16 q15, q7 ##### k = 2, 3 ##### # load b # q2 := vb01234567x01 VLD1.8 {d4, d5}, [ip]! # rotate a VEXT.8 d0, d0, d0, 2 VEXT.8 d1, d1, d1, 2 VEXT.8 d2, d2, d2, 2 VEXT.8 d3, d3, d3, 2 VMULL.U8 q4, d0, d4 VPADAL.U16 q8, q4 VMULL.U8 q5, d0, d5 VPADAL.U16 q9, q5 VMULL.U8 q6, d1, d4 VPADAL.U16 q10, q6 VMULL.U8 q7, d1, d5 VPADAL.U16 q11, q7 VMULL.U8 q4, d2, d4 VPADAL.U16 q12, q4 VMULL.U8 q5, d2, d5 VPADAL.U16 q13, q5 VMULL.U8 q6, d3, d4 VPADAL.U16 q14, q6 VMULL.U8 q7, d3, d5 VPADAL.U16 q15, q7 ##### k = 4, 5 ##### # load b # q2 := vb01234567x01 VLD1.8 {d4, d5}, [ip]! # rotate a VEXT.8 d0, d0, d0, 2 VEXT.8 d1, d1, d1, 2 VEXT.8 d2, d2, d2, 2 VEXT.8 d3, d3, d3, 2 VMULL.U8 q4, d0, d4 VPADAL.U16 q8, q4 VMULL.U8 q5, d0, d5 VPADAL.U16 q9, q5 VMULL.U8 q6, d1, d4 VPADAL.U16 q10, q6 VMULL.U8 q7, d1, d5 VPADAL.U16 q11, q7 VMULL.U8 q4, d2, d4 VPADAL.U16 q12, q4 VMULL.U8 q5, d2, d5 VPADAL.U16 q13, q5 VMULL.U8 q6, d3, d4 VPADAL.U16 q14, q6 VMULL.U8 q7, d3, d5 VPADAL.U16 q15, q7 ##### k = 6, 7 ##### # load b # q2 := vb01234567x01 VLD1.8 {d4, d5}, [ip]! # rotate a VEXT.8 d0, d0, d0, 2 VEXT.8 d1, d1, d1, 2 VEXT.8 d2, d2, d2, 2 VEXT.8 d3, d3, d3, 2 VMULL.U8 q4, d0, d4 VPADAL.U16 q8, q4 VMULL.U8 q5, d0, d5 VPADAL.U16 q9, q5 VMULL.U8 q6, d1, d4 VPADAL.U16 q10, q6 VMULL.U8 q7, d1, d5 VPADAL.U16 q11, q7 VMULL.U8 q4, d2, d4 VPADAL.U16 q12, q4 VMULL.U8 q5, d2, d5 VPADAL.U16 q13, q5 VMULL.U8 q6, d3, d4 VPADAL.U16 q14, q6 VMULL.U8 q7, d3, d5 VPADAL.U16 q15, q7 # k -= 8 SUBS r2, r2, 8 # k >= 0, loop BHS 0b 1: # k >= 4 ADDS r2, 8 CMP r2, 4 # branch to 2f when k < 4 BLO 2f SUB r2, r2, 4 ##### k = 0, 1 ##### # d0 := va0x01010101 VLD1.16 {d0[]}, [r3]! # d1 := va1x01010101 VLD1.16 {d1[]}, [r4]! # d2 := va2x01010101 VLD1.16 {d2[]}, [r5]! # d3 := va3x01010101 VLD1.16 {d3[]}, [r6]! # q7 := vb01234567x01 VLD1.8 {d14, d15}, [ip]! # row 0 VMULL.U8 q2, d0, d14 VPADAL.U16 q8, q2 VMULL.U8 q3, d0, d15 VPADAL.U16 q9, q3 # row 1 VMULL.U8 q4, d1, d14 VPADAL.U16 q10, q4 VMULL.U8 q5, d1, d15 VPADAL.U16 q11, q5 # row 2 VMULL.U8 q2, d2, d14 VPADAL.U16 q12, q2 VMULL.U8 q3, d2, d15 VPADAL.U16 q13, q3 # row 3 VMULL.U8 q4, d3, d14 VPADAL.U16 q14, q4 VMULL.U8 q5, d3, d15 VPADAL.U16 q15, q5 ##### k = 2, 3 ##### # d0 := va0x01010101 VLD1.16 {d0[]}, [r3]! # d1 := va1x01010101 VLD1.16 {d1[]}, [r4]! # d2 := va2x01010101 VLD1.16 {d2[]}, [r5]! # d3 := va3x01010101 VLD1.16 {d3[]}, [r6]! # q7 := vb01234567x01 VLD1.8 {d14, d15}, [ip]! # row 0 VMULL.U8 q2, d0, d14 VPADAL.U16 q8, q2 VMULL.U8 q3, d0, d15 VPADAL.U16 q9, q3 # row 1 VMULL.U8 q4, d1, d14 VPADAL.U16 q10, q4 VMULL.U8 q5, d1, d15 VPADAL.U16 q11, q5 # row 2 VMULL.U8 q2, d2, d14 VPADAL.U16 q12, q2 VMULL.U8 q3, d2, d15 VPADAL.U16 q13, q3 # row 3 VMULL.U8 q4, d3, d14 VPADAL.U16 q14, q4 VMULL.U8 q5, d3, d15 VPADAL.U16 q15, q5 2: # k >= 2 CMP r2, 2 BLO 3f SUB r2, r2, 2 ##### k = 0, 1 ##### # d0 := va0x01010101 VLD1.16 {d0[]}, [r3]! # d1 := va1x01010101 VLD1.16 {d1[]}, [r4]! # d2 := va2x01010101 VLD1.16 {d2[]}, [r5]! # d3 := va3x01010101 VLD1.16 {d3[]}, [r6]! # q7 := vb01234567x01 VLD1.8 {d14, d15}, [ip]! # row 0 VMULL.U8 q2, d0, d14 VPADAL.U16 q8, q2 VMULL.U8 q3, d0, d15 VPADAL.U16 q9, q3 # row 1 VMULL.U8 q4, d1, d14 VPADAL.U16 q10, q4 VMULL.U8 q5, d1, d15 VPADAL.U16 q11, q5 # row 2 VMULL.U8 q2, d2, d14 VPADAL.U16 q12, q2 VMULL.U8 q3, d2, d15 VPADAL.U16 q13, q3 # row 3 VMULL.U8 q4, d3, d14 VPADAL.U16 q14, q4 VMULL.U8 q5, d3, d15 VPADAL.U16 q15, q5 3: # k == 1 CMP r2, 1 BLO 4f # d0 := va0x01010101 VLD1.8 {d0[]}, [r3] # d1 := va1x01010101 VLD1.8 {d1[]}, [r4] # d2 := va2x01010101 VLD1.8 {d2[]}, [r5] # d3 := va3x01010101 VLD1.8 {d3[]}, [r6] # q7 := vb01234567x01 VLD1.8 {d14, d15}, [ip] # row 0 VMULL.U8 q2, d0, d14 VPADAL.U16 q8, q2 VMULL.U8 q3, d0, d15 VPADAL.U16 q9, q3 # row 1 VMULL.U8 q4, d1, d14 VPADAL.U16 q10, q4 VMULL.U8 q5, d1, d15 VPADAL.U16 q11, q5 # row 2 VMULL.U8 q2, d2, d14 VPADAL.U16 q12, q2 VMULL.U8 q3, d2, d15 VPADAL.U16 q13, q3 # row 3 VMULL.U8 q4, d3, d14 VPADAL.U16 q14, q4 VMULL.U8 q5, d3, d15 VPADAL.U16 q15, q5 .p2align 4 4: # Load params: # - ip = params LDR ip, [sp, 116] # Load multiplier: # - d12 = vmultiplier VLD1.32 {d12[]}, [ip]! # Load right_shift # - q4 = d8:d9 = vright_shift VLD1.32 {d8[], d9[]}, [ip]! VQRDMULH.S32 q8, q8, d12[0] VQRDMULH.S32 q9, q9, d12[0] VQRDMULH.S32 q10, q10, d12[0] VQRDMULH.S32 q11, q11, d12[0] # Compute vzero_shift_mask # - q5 = vzero_shift_mask VCEQ.S32 q5, q4, 0 VQRDMULH.S32 q12, q12, d12[0] VQRDMULH.S32 q13, q13, d12[0] VQRDMULH.S32 q14, q14, d12[0] VQRDMULH.S32 q15, q15, d12[0] VBIC q0, q8, q5 VBIC q1, q9, q5 VBIC q2, q10, q5 VBIC q3, q11, q5 VSRA.S32 q8, q0, 31 VSRA.S32 q9, q1, 31 VSRA.S32 q10, q2, 31 VSRA.S32 q11, q3, 31 # Load zero_point # - q7 = d14:d15 = vzero_point VLD1.16 {d14[], d15[]}, [ip]! VBIC q0, q12, q5 VBIC q1, q13, q5 VBIC q2, q14, q5 VBIC q3, q15, q5 VSRA.S32 q12, q0, 31 VSRA.S32 q13, q1, 31 VSRA.S32 q14, q2, 31 VSRA.S32 q15, q3, 31 # Load max: # - q5 = d10:d11 = vmax VLD1.8 {d10[], d11[]}, [ip]! VRSHL.S32 q8, q8, q4 VRSHL.S32 q9, q9, q4 VRSHL.S32 q10, q10, q4 VRSHL.S32 q11, q11, q4 VRSHL.S32 q12, q12, q4 VRSHL.S32 q13, q13, q4 VRSHL.S32 q14, q14, q4 VRSHL.S32 q15, q15, q4 # Load c, c_stride: # - r2 = c # - r3 = c_stride LDRD r2, r3, [sp, 108] VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 # Load min: # - q4 = q8:q9 = vmin VLD1.8 {d8[], d9[]}, [ip]! ADD r4, r2, r3 VQADD.S16 q8, q8, q7 VQADD.S16 q9, q9, q7 CMP r0, 2 VQADD.S16 q10, q10, q7 VQADD.S16 q11, q11, q7 MOVLO r4, r2 VQMOVUN.S16 d16, q8 VQMOVUN.S16 d17, q9 ADD r5, r4, r3 VQMOVUN.S16 d18, q10 VQMOVUN.S16 d19, q11 MOVLS r5, r4 VMIN.U8 q8, q8, q5 CMP r0, 4 VMIN.U8 q9, q9, q5 ADD r3, r5, r3 VMAX.U8 q8, q8, q4 MOVNE r3, r5 CMP r1, 8 VMAX.U8 q9, q9, q4 BNE 5f VST1.8 {d16}, [r2] VST1.8 {d17}, [r4] VST1.8 {d18}, [r5] VST1.8 {d19}, [r3] VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr .p2align 3 5: CMP r1, 4 BLO 6f VST1.32 {d16[0]}, [r2]! VST1.32 {d17[0]}, [r4]! VST1.32 {d18[0]}, [r5]! VST1.32 {d19[0]}, [r3]! SUB r1, 4 VEXT.8 q8, q8, q8, 4 VEXT.8 q9, q9, q9, 4 6: CMP r1, 2 BLO 7f VST1.16 {d16[0]}, [r2]! VST1.16 {d17[0]}, [r4]! VST1.16 {d18[0]}, [r5]! VST1.16 {d19[0]}, [r3]! SUB r1, 2 VEXT.8 q8, q8, q8, 2 VEXT.8 q9, q9, q9, 2 7: TEQ r1, 0 BEQ 8f VST1.8 {d16[0]}, [r2] VST1.8 {d17[0]}, [r4] VST1.8 {d18[0]}, [r5] VST1.8 {d19[0]}, [r3] 8: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
17,365
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-dq-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> # r0 mr # r1 nr # r2 k # r3 a # r6 a_stride # d14 a_zero_point # d15 b_zero_point ## Stack # 4 quantization_params # 4 c_stride # 4 c # 4 b # 4 w # 4 a_stride # -- # 16 r4-r7 # 64 d8-d18 .syntax unified # Args passed via stack. # TOS # |-----------| # |a_stride | 0 # |w | 4 # |c | 8 # |c_stride | 12 # |out ch indx| 16 # |params | 20 # |-----------| # # After loading w pointer in ip reg. # And after pushing r4-r8 and d8-d15 on stack # |-----------| # |d8 - d15 | 0 # |r4 - r7 | 64 # |a_stride | 80 # |w | 84 # |b | 88 # |c | 92 # |c_stride | 96 # |out ch indx| 100 # |params | 104 # |-----------| # # void pytorch_q8gemm_ukernel_4x8__aarch32_neon( # size_t mr, # size_t nr, # size_t k, # const uint8_t* restrict a, # size_t a_stride, # const void* restrict w, # const float* restrict b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load w # - ip = w LDR ip, [sp, 4] ADD ip, ip, 32 PUSH {r4, r5, r6, r7} VPUSH {d8-d15} # Load output channel index LDR r5, [sp, 100] # Load quantization params # - r7 = quantization_params LDR r7, [sp, 104] # Load input_zero_point VLD1.8 {d14[]}, [r7] ADD r7, r7, 4 # Load pointer to per channel zero points array # Post-index: After load increment r7 by 4 LDR r4, [r7], #4 # Byte offset of output channel index for requant scale. LSL r6, r5, 2 VEOR q8, q8, q8 VEOR q9, q9, q9 # Load pointer to per channel requant scale LDR r7, [r7] # Add output_channel_index to the b_zero_point pointer ADD r4, r4, r5 # Now r7 has the base_addr + offset for multipliers ADD r7, r7, r6 # Load a_stride # - r6 = a_stride LDR r6, [sp, 80] VEOR q10, q10, q10 VEOR q11, q11, q11 VLD1.8 {d15}, [r4] CMP r0, 2 ADD r4, r3, r6 MOVLO r4, r3 ADD r5, r4, r6 MOVLS r5, r4 CMP r0, 4 ADD r6, r5, r6 MOVNE r6, r5 VEOR q12, q12, q12 VEOR q13, q13, q13 VEOR q14, q14, q14 VEOR q15, q15, q15 SUBS r2, r2, 8 BLO 1f .p2align 5 0: # Load a0 # - d1 = a0 VLD1.8 {d1}, [r3]! # Load a1 # - d3 = a1 VLD1.8 {d3}, [r4]! # Load b0-b7 (channel 0) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # Load a2 # - d5 = a2 VLD1.8 {d5}, [r5]! # q0 = va0 = a0 SUB_ZERO_POINT q0, d1, d14 # Load a3 # - d7 = a3 VLD1.8 {d7}, [r6]! # q1 = va1 = a1 SUB_ZERO_POINT q1, d3, d14 # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 0) # - d9 = vb4567 (channel 0) VSUBL.U8 q4, d9, d15 # q2 = va2 = a2 SUB_ZERO_POINT q2, d5, d14 # q3 = va3 = a3 SUB_ZERO_POINT q3, d7, d14 ### Channel 0 ### # Load b0-b7 (channel 1) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[0] VMLAL.S16 q8, d8, d0[0] # vacc0x4567 += vb4567 * va0[0] VMLAL.S16 q9, d9, d0[0] # vacc1x0123 += vb0123 * va1[0] VMLAL.S16 q10, d8, d2[0] # vacc1x4567 += vb4567 * va1[0] VMLAL.S16 q11, d9, d2[0] # vacc2x0123 += vb0123 * va2[0] VMLAL.S16 q12, d8, d4[0] # vacc2x4567 += vb4567 * va2[0] VMLAL.S16 q13, d9, d4[0] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 1) # - d11 = vb4567 (channel 1) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[0] VMLAL.S16 q14, d8, d6[0] # vacc3x4567 += vb4567 * va3[0] VMLAL.S16 q15, d9, d6[0] ### Channel 1 ### # Load b0-b7 (channel 2) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[1] VMLAL.S16 q8, d10, d0[1] # vacc0x4567 += vb4567 * va0[1] VMLAL.S16 q9, d11, d0[1] # vacc1x0123 += vb0123 * va1[1] VMLAL.S16 q10, d10, d2[1] # vacc1x4567 += vb4567 * va1[1] VMLAL.S16 q11, d11, d2[1] # vacc2x0123 += vb0123 * va2[1] VMLAL.S16 q12, d10, d4[1] # vacc2x4567 += vb4567 * va2[1] VMLAL.S16 q13, d11, d4[1] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 2) # - d9 = vb4567 (channel 2) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[1] VMLAL.S16 q14, d10, d6[1] # vacc3x4567 += vb4567 * va3[1] VMLAL.S16 q15, d11, d6[1] ### Channel 2 ### # Load b0-b7 (channel 3) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[2] VMLAL.S16 q8, d8, d0[2] # vacc0x4567 += vb4567 * va0[2] VMLAL.S16 q9, d9, d0[2] # vacc1x0123 += vb0123 * va1[2] VMLAL.S16 q10, d8, d2[2] # vacc1x4567 += vb4567 * va1[2] VMLAL.S16 q11, d9, d2[2] # vacc2x0123 += vb0123 * va2[2] VMLAL.S16 q12, d8, d4[2] # vacc2x4567 += vb4567 * va2[2] VMLAL.S16 q13, d9, d4[2] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 3) # - d11 = vb4567 (channel 3) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[2] VMLAL.S16 q14, d8, d6[2] # vacc3x4567 += vb4567 * va3[2] VMLAL.S16 q15, d9, d6[2] ### Channel 3 ### # Load b0-b7 (channel 4) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[3] VMLAL.S16 q8, d10, d0[3] # vacc0x4567 += vb4567 * va0[3] VMLAL.S16 q9, d11, d0[3] # vacc1x0123 += vb0123 * va1[3] VMLAL.S16 q10, d10, d2[3] # vacc1x4567 += vb4567 * va1[3] VMLAL.S16 q11, d11, d2[3] # vacc2x0123 += vb0123 * va2[3] VMLAL.S16 q12, d10, d4[3] # vacc2x4567 += vb4567 * va2[3] VMLAL.S16 q13, d11, d4[3] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 4) # - d11 = vb4567 (channel 4) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[3] VMLAL.S16 q14, d10, d6[3] # vacc3x4567 += vb4567 * va3[3] VMLAL.S16 q15, d11, d6[3] ### Channel 4 ### # Load b0-b7 (channel 5) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[4] VMLAL.S16 q8, d8, d1[0] # vacc0x4567 += vb4567 * va0[4] VMLAL.S16 q9, d9, d1[0] # vacc1x0123 += vb0123 * va1[4] VMLAL.S16 q10, d8, d3[0] # vacc1x4567 += vb4567 * va1[4] VMLAL.S16 q11, d9, d3[0] # vacc2x0123 += vb0123 * va2[4] VMLAL.S16 q12, d8, d5[0] # vacc2x4567 += vb4567 * va2[4] VMLAL.S16 q13, d9, d5[0] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 5) # - d9 = vb4567 (channel 5) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[4] VMLAL.S16 q14, d8, d7[0] # vacc3x4567 += vb4567 * va3[4] VMLAL.S16 q15, d9, d7[0] ### Channel 5 ### # Load b0-b7 (channel 6) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[5] VMLAL.S16 q8, d10, d1[1] # vacc0x4567 += vb4567 * va0[5] VMLAL.S16 q9, d11, d1[1] # vacc1x0123 += vb0123 * va1[5] VMLAL.S16 q10, d10, d3[1] # vacc1x4567 += vb4567 * va1[5] VMLAL.S16 q11, d11, d3[1] # vacc2x0123 += vb0123 * va2[5] VMLAL.S16 q12, d10, d5[1] # vacc2x4567 += vb4567 * va2[5] VMLAL.S16 q13, d11, d5[1] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 6) # - d9 = vb4567 (channel 6) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[5] VMLAL.S16 q14, d10, d7[1] # vacc3x4567 += vb4567 * va3[5] VMLAL.S16 q15, d11, d7[1] ### Channel 6 ### # Load b0-b7 (channel 7) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[6] VMLAL.S16 q8, d8, d1[2] # vacc0x4567 += vb4567 * va0[6] VMLAL.S16 q9, d9, d1[2] # vacc1x0123 += vb0123 * va1[6] VMLAL.S16 q10, d8, d3[2] # vacc1x4567 += vb4567 * va1[6] VMLAL.S16 q11, d9, d3[2] # vacc2x0123 += vb0123 * va2[6] VMLAL.S16 q12, d8, d5[2] # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 7) # - d11 = vb4567 (channel 7) VSUBL.U8 q5, d11, d15 # vacc2x4567 += vb4567 * va2[6] VMLAL.S16 q13, d9, d5[2] # vacc3x0123 += vb0123 * va3[6] VMLAL.S16 q14, d8, d7[2] # vacc3x4567 += vb4567 * va3[6] VMLAL.S16 q15, d9, d7[2] ### Channel 8 ### SUBS r2, r2, 8 # vacc0x0123 += vb0123 * va0[7] VMLAL.S16 q8, d10, d1[3] # vacc0x4567 += vb4567 * va0[7] VMLAL.S16 q9, d11, d1[3] # vacc1x0123 += vb0123 * va1[7] VMLAL.S16 q10, d10, d3[3] # vacc1x4567 += vb4567 * va1[7] VMLAL.S16 q11, d11, d3[3] # vacc2x0123 += vb0123 * va2[7] VMLAL.S16 q12, d10, d5[3] # vacc2x4567 += vb4567 * va2[7] VMLAL.S16 q13, d11, d5[3] # vacc3x0123 += vb0123 * va3[7] VMLAL.S16 q14, d10, d7[3] # vacc3x4567 += vb4567 * va3[7] VMLAL.S16 q15, d11, d7[3] BHS 0b 1: CMP r2, -8 BEQ 2f # Adjust a0, a1, a2, a3 ADD r3, r2 ADD r4, r2 ADD r5, r2 ADD r6, r2 # a_shift = 8 * k - 64 LSL r2, r2, 3 VDUP.32 d13, r2 # Load a0 # - d1 = a0 VLD1.8 {d1}, [r3] # Load a1 # - d3 = a1 VLD1.8 {d3}, [r4] # Load b0-b7 (channel 0) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # Load a2 # - d5 = a2 VLD1.8 {d5}, [r5] # q0 = va0 = a0 VSHL.U64 d1, d1, d13 SUB_ZERO_POINT q0, d1, d14 # Load a3 # - d7 = a3 VLD1.8 {d7}, [r6] # q1 = va1 = a1 VSHL.U64 d3, d3, d13 SUB_ZERO_POINT q1, d3, d14 # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 0) # - d9 = vb4567 (channel 0) VSUBL.U8 q4, d9, d15 # q2 = va2 = a2 VSHL.U64 d5, d5, d13 SUB_ZERO_POINT q2, d5, d14 # q3 = va3 = a3 VSHL.U64 d7, d7, d13 SUB_ZERO_POINT q3, d7, d14 ### Channel 0 ### # vacc0x0123 += vb0123 * va0[0] VMLAL.S16 q8, d8, d0[0] # vacc0x4567 += vb4567 * va0[0] VMLAL.S16 q9, d9, d0[0] # vacc1x0123 += vb0123 * va1[0] VMLAL.S16 q10, d8, d2[0] # vacc1x4567 += vb4567 * va1[0] VMLAL.S16 q11, d9, d2[0] # vacc2x0123 += vb0123 * va2[0] VMLAL.S16 q12, d8, d4[0] # vacc2x4567 += vb4567 * va2[0] VMLAL.S16 q13, d9, d4[0] # vacc3x0123 += vb0123 * va3[0] VMLAL.S16 q14, d8, d6[0] # vacc3x4567 += vb4567 * va3[0] VMLAL.S16 q15, d9, d6[0] CMP r2, -48 BLO 2f ### Channel 1 ### # Load b0-b7 (channel 1) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 1) # - d11 = vb4567 (channel 1) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[1] VMLAL.S16 q8, d10, d0[1] # vacc0x4567 += vb4567 * va0[1] VMLAL.S16 q9, d11, d0[1] # vacc1x0123 += vb0123 * va1[1] VMLAL.S16 q10, d10, d2[1] # vacc1x4567 += vb4567 * va1[1] VMLAL.S16 q11, d11, d2[1] # vacc2x0123 += vb0123 * va2[1] VMLAL.S16 q12, d10, d4[1] # vacc2x4567 += vb4567 * va2[1] VMLAL.S16 q13, d11, d4[1] # vacc3x0123 += vb0123 * va3[1] VMLAL.S16 q14, d10, d6[1] # vacc3x4567 += vb4567 * va3[1] VMLAL.S16 q15, d11, d6[1] ### Channel 2 ### BLS 2f # Load b0-b7 (channel 2) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 2) # - d9 = vb4567 (channel 2) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[2] VMLAL.S16 q8, d8, d0[2] # vacc0x4567 += vb4567 * va0[2] VMLAL.S16 q9, d9, d0[2] # vacc1x0123 += vb0123 * va1[2] VMLAL.S16 q10, d8, d2[2] # vacc1x4567 += vb4567 * va1[2] VMLAL.S16 q11, d9, d2[2] # vacc2x0123 += vb0123 * va2[2] VMLAL.S16 q12, d8, d4[2] # vacc2x4567 += vb4567 * va2[2] VMLAL.S16 q13, d9, d4[2] # vacc3x0123 += vb0123 * va3[2] VMLAL.S16 q14, d8, d6[2] # vacc3x4567 += vb4567 * va3[2] VMLAL.S16 q15, d9, d6[2] ### Channel 3 ### CMP r2, -32 BLO 2f # Load b0-b7 (channel 3) # - d9 = b0-b7 VLD1.8 {d11}, [ip:64]! # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 3) # - d9 = vb4567 (channel 3) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[3] VMLAL.S16 q8, d10, d0[3] # vacc0x4567 += vb4567 * va0[3] VMLAL.S16 q9, d11, d0[3] # vacc1x0123 += vb0123 * va1[3] VMLAL.S16 q10, d10, d2[3] # vacc1x4567 += vb4567 * va1[3] VMLAL.S16 q11, d11, d2[3] # vacc2x0123 += vb0123 * va2[3] VMLAL.S16 q12, d10, d4[3] # vacc2x4567 += vb4567 * va2[3] VMLAL.S16 q13, d11, d4[3] # vacc3x0123 += vb0123 * va3[3] VMLAL.S16 q14, d10, d6[3] # vacc3x4567 += vb4567 * va3[3] VMLAL.S16 q15, d11, d6[3] ### Channel 4 ### BLS 2f # Load b0-b7 (channel 4) # - d11 = b0-b7 VLD1.8 {d9}, [ip:64]! # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 4) # - d11 = vb4567 (channel 4) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[4] VMLAL.S16 q8, d8, d1[0] # vacc0x4567 += vb4567 * va0[4] VMLAL.S16 q9, d9, d1[0] # vacc1x0123 += vb0123 * va1[4] VMLAL.S16 q10, d8, d3[0] # vacc1x4567 += vb4567 * va1[4] VMLAL.S16 q11, d9, d3[0] # vacc2x0123 += vb0123 * va2[4] VMLAL.S16 q12, d8, d5[0] # vacc2x4567 += vb4567 * va2[4] VMLAL.S16 q13, d9, d5[0] # vacc3x0123 += vb0123 * va3[4] VMLAL.S16 q14, d8, d7[0] # vacc3x4567 += vb4567 * va3[4] VMLAL.S16 q15, d9, d7[0] ### Channel 5 ### CMP r2, -16 BLO 2f # Load b0-b7 (channel 5) # - d13 = b0-b7 VLD1.8 {d11}, [ip:64]! # q5 = b0:7 - b_zero_point # - d10 = vb0123 (channel 5) # - d11 = vb4567 (channel 5) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[5] VMLAL.S16 q8, d10, d1[1] # vacc0x4567 += vb4567 * va0[5] VMLAL.S16 q9, d11, d1[1] # vacc1x0123 += vb0123 * va1[5] VMLAL.S16 q10, d10, d3[1] # vacc1x4567 += vb4567 * va1[5] VMLAL.S16 q11, d11, d3[1] # vacc2x0123 += vb0123 * va2[5] VMLAL.S16 q12, d10, d5[1] # vacc2x4567 += vb4567 * va2[5] VMLAL.S16 q13, d11, d5[1] # vacc3x0123 += vb0123 * va3[5] VMLAL.S16 q14, d10, d7[1] # vacc3x4567 += vb4567 * va3[5] VMLAL.S16 q15, d11, d7[1] ### Channel 6 ### BLS 2f # Load b0-b7 (channel 6) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64] # q4 = b0:7 - b_zero_point # - d8 = vb0123 (channel 6) # - d9 = vb4567 (channel 6) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[6] VMLAL.S16 q8, d8, d1[2] # vacc0x4567 += vb4567 * va0[6] VMLAL.S16 q9, d9, d1[2] # vacc1x0123 += vb0123 * va1[6] VMLAL.S16 q10, d8, d3[2] # vacc1x4567 += vb4567 * va1[6] VMLAL.S16 q11, d9, d3[2] # vacc2x0123 += vb0123 * va2[6] VMLAL.S16 q12, d8, d5[2] # vacc2x4567 += vb4567 * va2[6] VMLAL.S16 q13, d9, d5[2] # vacc3x0123 += vb0123 * va3[6] VMLAL.S16 q14, d8, d7[2] # vacc3x4567 += vb4567 * va3[6] VMLAL.S16 q15, d9, d7[2] .p2align 4 2: LDR r6, [sp, 88] # Load q6: vmultiplier_c0123 VLD1.32 {d12, d13}, [r7]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 # Load q7: vmultiplier_c4567 VLD1.32 {d14, d15}, [r7] VLD1.32 {q0, q1}, [r6] VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q6 VMUL.F32 q9, q9, q7 VMUL.F32 q10, q10, q6 VMUL.F32 q11, q11, q7 VMUL.F32 q12, q12, q6 VMUL.F32 q13, q13, q7 VMUL.F32 q14, q14, q6 VMUL.F32 q15, q15, q7 VADD.F32 q8, q8, q0 VADD.F32 q9, q9, q1 VADD.F32 q10, q10, q0 VADD.F32 q11, q11, q1 VADD.F32 q12, q12, q0 VADD.F32 q13, q13, q1 VADD.F32 q14, q14, q0 VADD.F32 q15, q15, q1 # Load c, c_stride: # - r2 = c # - r3 = c_stride LDRD r2, r3, [sp, 92] LSL r3, r3, 2 ADD r4, r2, r3 CMP r0, 2 MOVLO r4, r2 ADD r5, r4, r3 MOVLS r5, r4 CMP r0, 4 ADD r3, r5, r3 MOVNE r3, r5 CMP r1, 8 BNE 4f VST1.32 {q8}, [r2]! VST1.32 {q10}, [r4]! VST1.32 {q12}, [r5]! VST1.32 {q14}, [r3]! VST1.32 {q9}, [r2] VST1.32 {q11}, [r4] VST1.32 {q13}, [r5] VST1.32 {q15}, [r3] VPOP {d8-d15} POP {r4, r5, r6, r7} BX lr .p2align 3 4: CMP r1, 4 BLO 5f VST1.32 {q8}, [r2]! VST1.32 {q10}, [r4]! VST1.32 {q12}, [r5]! VST1.32 {q14}, [r3]! SUB r1, 4 VMOV.32 q8, q9 VMOV.32 q10, q11 VMOV.32 q12, q13 VMOV.32 q14, q15 5: CMP r1, 2 BLO 6f VST1.32 {d16}, [r2]! VST1.32 {d20}, [r4]! VST1.32 {d24}, [r5]! VST1.32 {d28}, [r3]! SUB r1, 2 VEXT.32 q8, q8, 2 VEXT.32 q10, q10, 2 VEXT.32 q12, q12, 2 VEXT.32 q14, q14, 2 6: TEQ r1, 0 BEQ 7f VST1.32 {d16[0]}, [r2]! VST1.32 {d20[0]}, [r4]! VST1.32 {d24[0]}, [r5]! VST1.32 {d28[0]}, [r3]! 7: VPOP {d8-d15} POP {r4, r5, r6, r7} BX lr END_FUNCTION pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
26,778
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> # params # c_stride # Args passed via stack. # TOS # |-----------| # |c_stride | 0 # |out ch indx| 8 # |params | 16 # |-----------| # void pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon( # size_t mr, # size_t nr, # size_t k, # const uint8_t*restrict a, # size_t a_stride, # const void*restrict w, # const float*restrict b, # uint8_t*restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon STP d15, d14, [sp, -16] STP d13, d12, [sp, -32] STP d11, d10, [sp, -48] STP d9, d8, [sp, -64] # Skip over bias0123, bias4567 ADD x5, x5, 32 # Load c_stride & params LDR x16, [sp] # Load output channel index LDR x10, [sp, 8] # Load params LDR x8, [sp, 16] # Load a_zero_point LD1R {v24.8b}, [x8] ADD x8, x8, 8 # Load pointer to per channel zero points array LDR x17, [x8], 8 # v8 := zero EOR v8.16b, v8.16b, v8.16b # v9 := zero EOR v9.16b, v9.16b, v9.16b # v10 := zero EOR v10.16b, v10.16b, v10.16b # v11 := zero EOR v11.16b, v11.16b, v11.16b # Load pointer to per channel multiplier LDR x13, [x8] # v12 := zero EOR v12.16b, v12.16b, v12.16b # v13 := zero EOR v13.16b, v13.16b, v13.16b # Add offset to the base pointer ADD x17, x17, x10 # Mul by 4 to get byte offset for multiplier LSL x10, x10, 2 # Add offset to the base pointer for multiplier ADD x13, x13, x10 # Load b_zero_point LD1 {v25.8b}, [x17] # Load multiplier c0123 LD1 {v26.4s}, [x13], 16 # Load multiplier c4567 LD1 {v30.4s}, [x13] # v14 := zero EOR v14.16b, v14.16b, v14.16b # v15 := zero EOR v15.16b, v15.16b, v15.16b # v16 := zero EOR v16.16b, v16.16b, v16.16b # v17 := zero EOR v17.16b, v17.16b, v17.16b # v18 := zero EOR v18.16b, v18.16b, v18.16b # v19 := zero EOR v19.16b, v19.16b, v19.16b # v20 := zero EOR v20.16b, v20.16b, v20.16b # v21 := zero EOR v21.16b, v21.16b, v21.16b # v22 := zero EOR v22.16b, v22.16b, v22.16b # v23 := zero EOR v23.16b, v23.16b, v23.16b # a1 CMP x0, 2 ADD x9, x3, x4 CSEL x9, x3, x9, LO # a2 ADD x10, x9, x4 CSEL x10, x9, x10, LS # a3 CMP x0, 4 ADD x11, x10, x4 CSEL x11, x10, x11, LO # a4 ADD x12, x11, x4 CSEL x12, x11, x12, LS # a5 CMP x0, 6 ADD x13, x12, x4 CSEL x13, x12, x13, LO # a6 ADD x14, x13, x4 CSEL x14, x13, x14, LS # a7 CMP x0, 8 ADD x15, x14, x4 CSEL x15, x14, x15, NE SUBS x2, x2, 8 B.LO 1f #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 5 #endif 0: // b0-7 (channel 0) LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b # va0 - va7 := va - va_zero_point LD1 {v0.8b}, [x3], 8 SUB_ZERO_POINT v0.8h, v0.8b, v24.8b LD1 {v1.8b}, [x9], 8 SUB_ZERO_POINT v1.8h, v1.8b, v24.8b LD1 {v2.8b}, [x10], 8 SUB_ZERO_POINT v2.8h, v2.8b, v24.8b LD1 {v3.8b}, [x11], 8 SUB_ZERO_POINT v3.8h, v3.8b, v24.8b LD1 {v4.8b}, [x12], 8 SUB_ZERO_POINT v4.8h, v4.8b, v24.8b LD1 {v5.8b}, [x13], 8 SUB_ZERO_POINT v5.8h, v5.8b, v24.8b LD1 {v6.8b}, [x14], 8 SUB_ZERO_POINT v6.8h, v6.8b, v24.8b LD1 {v7.8b}, [x15], 8 SUB_ZERO_POINT v7.8h, v7.8b, v24.8b // b0-7 (channel 1) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0] SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0] SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0] SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0] SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0] SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0] SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0] SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0] SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0] SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0] SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0] SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0] SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0] SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0] SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0] // b0-7 (channel 2) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1] SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1] SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1] SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1] SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1] SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1] SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1] SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1] SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1] SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1] SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1] SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1] SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1] SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1] SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1] // b0-7 (channel 3) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2] SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2] SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2] SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2] SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2] SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2] SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2] SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2] SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2] SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2] SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2] SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2] SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2] SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2] SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2] // b0-7 (channel 4) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3] SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3] SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3] SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3] SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3] SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3] SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3] SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3] SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3] SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3] SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3] SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3] SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3] SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3] SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3] // b0-7 (channel 5) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4] SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4] SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4] SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4] SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4] SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4] SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4] SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4] SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4] SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4] SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4] SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4] SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4] SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4] SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4] // b0-7 (channel 6) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5] SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5] SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5] SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5] SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5] SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5] SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5] SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5] SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5] SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5] SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5] SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5] SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5] SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5] SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5] // b0-7 (channel 7) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6] SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6] SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6] SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6] SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6] SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6] SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6] SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6] SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6] SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6] SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6] SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6] SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6] SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6] SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6] SUBS x2, x2, 8 SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7] SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7] SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7] SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7] SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7] SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7] SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7] SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7] SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7] SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7] SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7] SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7] SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7] SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7] SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7] SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7] B.HS 0b 1: CMP x2, -8 B.EQ 2f // Adjust a0-a7 ADD x3, x3, x2 ADD x9, x9, x2 ADD x10, x10, x2 ADD x11, x11, x2 ADD x12, x12, x2 ADD x13, x13, x2 ADD x14, x14, x2 ADD x15, x15, x2 // a_shift = 8 * k - 64 LSL x2, x2, 3 FMOV d29, x2 USHL d24, d24, d29 // Load x0-a7 LD1 {v0.8b}, [x3], 8 USHL d0, d0, d29 SUB_ZERO_POINT v0.8h, v0.8b, v24.8b LD1 {v1.8b}, [x9], 8 USHL d1, d1, d29 SUB_ZERO_POINT v1.8h, v1.8b, v24.8b LD1 {v2.8b}, [x10], 8 USHL d2, d2, d29 SUB_ZERO_POINT v2.8h, v2.8b, v24.8b LD1 {v3.8b}, [x11], 8 USHL d3, d3, d29 SUB_ZERO_POINT v3.8h, v3.8b, v24.8b LD1 {v4.8b}, [x12], 8 USHL d4, d4, d29 SUB_ZERO_POINT v4.8h, v4.8b, v24.8b LD1 {v5.8b}, [x13], 8 USHL d5, d5, d29 SUB_ZERO_POINT v5.8h, v5.8b, v24.8b LD1 {v6.8b}, [x14], 8 USHL d6, d6, d29 SUB_ZERO_POINT v6.8h, v6.8b, v24.8b LD1 {v7.8b}, [x15], 8 USHL d7, d7, d29 SUB_ZERO_POINT v7.8h, v7.8b, v24.8b // Channel 0 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0] SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0] SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0] SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0] SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0] SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0] SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0] SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0] SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0] SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0] SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0] SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0] SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0] SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0] SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0] SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0] CMP x2, -48 B.LO 2f // Channel 1 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1] SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1] SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1] SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1] SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1] SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1] SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1] SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1] SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1] SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1] SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1] SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1] SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1] SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1] SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1] SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1] B.LS 2f // Channel 2 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2] SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2] SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2] SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2] SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2] SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2] SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2] SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2] SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2] SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2] SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2] SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2] SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2] SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2] SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2] SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2] CMP x2, -32 B.LO 2f // Channel 3 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3] SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3] SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3] SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3] SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3] SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3] SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3] SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3] SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3] SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3] SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3] SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3] SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3] SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3] SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3] SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3] B.LS 2f // Channel 4 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4] SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4] SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4] SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4] SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4] SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4] SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4] SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4] SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4] SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4] SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4] SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4] SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4] SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4] SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4] SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4] CMP x2, -16 B.LO 2f // Channel 5 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5] SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5] SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5] SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5] SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5] SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5] SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5] SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5] SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5] SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5] SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5] SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5] SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5] SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5] SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5] SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5] B.LS 2f // Channel 6 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6] SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6] SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6] SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6] SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6] SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6] SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6] SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6] SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6] SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6] SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6] SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6] SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6] SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6] SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6] SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6] #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 4 #endif 2: LSL x16, x16, 2 LD1 {v24.4s}, [x6], 16 LD1 {v25.4s}, [x6] SCVTF v8.4s, v8.4s SCVTF v9.4s, v9.4s SCVTF v10.4s, v10.4s SCVTF v11.4s, v11.4s SCVTF v12.4s, v12.4s SCVTF v13.4s, v13.4s SCVTF v14.4s, v14.4s SCVTF v15.4s, v15.4s SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v8.4s, v8.4s, v26.4s FMUL v9.4s, v9.4s, v30.4s FMUL v10.4s, v10.4s, v26.4s FMUL v11.4s, v11.4s, v30.4s FMUL v12.4s, v12.4s, v26.4s FMUL v13.4s, v13.4s, v30.4s FMUL v14.4s, v14.4s, v26.4s FMUL v15.4s, v15.4s, v30.4s FMUL v16.4s, v16.4s, v26.4s FMUL v17.4s, v17.4s, v30.4s FMUL v18.4s, v18.4s, v26.4s FMUL v19.4s, v19.4s, v30.4s FMUL v20.4s, v20.4s, v26.4s FMUL v21.4s, v21.4s, v30.4s FMUL v22.4s, v22.4s, v26.4s FMUL v23.4s, v23.4s, v30.4s FADD v8.4s, v8.4s, v24.4s FADD v9.4s, v9.4s, v25.4s FADD v10.4s, v10.4s, v24.4s FADD v11.4s, v11.4s, v25.4s FADD v12.4s, v12.4s, v24.4s FADD v13.4s, v13.4s, v25.4s FADD v14.4s, v14.4s, v24.4s FADD v15.4s, v15.4s, v25.4s FADD v16.4s, v16.4s, v24.4s FADD v17.4s, v17.4s, v25.4s FADD v18.4s, v18.4s, v24.4s FADD v19.4s, v19.4s, v25.4s FADD v20.4s, v20.4s, v24.4s FADD v21.4s, v21.4s, v25.4s FADD v22.4s, v22.4s, v24.4s FADD v23.4s, v23.4s, v25.4s // Compute c0-c7 ADD x9, x7, x16 CMP x0, 2 CSEL x9, x7, x9, LO ADD x10, x9, x16 CSEL x10, x9, x10, LS ADD x11, x10, x16 CMP x0, 4 CSEL x11, x10, x11, LO ADD x12, x11, x16 CSEL x12, x11, x12, LS ADD x13, x12, x16 CMP x0, 6 CSEL x13, x12, x13, LO ADD x14, x13, x16 CSEL x14, x13, x14, LS ADD x15, x14, x16 CMP x0, 8 CSEL x15, x14, x15, NE CMP x1, 8 B.NE 4f ST1 {v8.4s}, [x7], 16 ST1 {v9.4s}, [x7] ST1 {v10.4s}, [x9], 16 ST1 {v11.4s}, [x9] ST1 {v12.4s}, [x10], 16 ST1 {v13.4s}, [x10] ST1 {v14.4s}, [x11], 16 ST1 {v15.4s}, [x11] ST1 {v16.4s}, [x12], 16 ST1 {v17.4s}, [x12] ST1 {v18.4s}, [x13], 16 ST1 {v19.4s}, [x13] ST1 {v20.4s}, [x14], 16 ST1 {v21.4s}, [x14] ST1 {v22.4s}, [x15], 16 ST1 {v23.4s}, [x15] LDP d9, d8, [sp, -64] LDP d11, d10, [sp, -48] LDP d13, d12, [sp, -32] LDP d15, d14, [sp, -16] RET #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 3 #endif 4: CMP x1, 4 B.LO 5f ST1 {v8.4s}, [x7], 16 ST1 {v10.4s}, [x9], 16 ST1 {v12.4s}, [x10], 16 ST1 {v14.4s}, [x11], 16 ST1 {v16.4s}, [x12], 16 ST1 {v18.4s}, [x13], 16 ST1 {v20.4s}, [x14], 16 ST1 {v22.4s}, [x15], 16 SUB x1, x1, 4 MOV V8.16b, V9.16b MOV v10.16b, v11.16b MOV v12.16b, V13.16b MOV V14.16b, V15.16b MOV V16.16b, V17.16b MOV V18.16b, V19.16b MOV V20.16b, V21.16b MOV V22.16b, V23.16b 5: CMP x1, 2 B.LO 6f ST1 {v8.2s}, [x7], 8 ST1 {v10.2s}, [x9], 8 ST1 {v12.2s}, [x10], 8 ST1 {v14.2s}, [x11], 8 ST1 {v16.2s}, [x12], 8 ST1 {v18.2s}, [x13], 8 ST1 {v20.2s}, [x14], 8 ST1 {v22.2s}, [x15], 8 SUB x1, x1, 2 EXT v8.16b, v8.16b, v8.16b, 8 EXT v10.16b, v10.16b, v10.16b, 8 EXT v12.16b, v12.16b, v12.16b, 8 EXT V14.16b, V14.16b, V14.16b, 8 EXT V16.16b, V16.16b, V16.16b, 8 EXT V18.16b, V18.16b, V18.16b, 8 EXT V20.16b, V20.16b, V20.16b, 8 EXT V22.16b, V22.16b, V22.16b, 8 6: CMP x1, 1 B.LO 7f ST1 {v8.s}[0], [x7] ST1 {v10.s}[0], [x9] ST1 {v12.s}[0], [x10] ST1 {v14.s}[0], [x11] ST1 {v16.s}[0], [x12] ST1 {v18.s}[0], [x13] ST1 {v20.s}[0], [x14] ST1 {v22.s}[0], [x15] 7: LDP d9, d8, [sp, -64] LDP d11, d10, [sp, -48] LDP d13, d12, [sp, -32] LDP d15, d14, [sp, -16] RET END_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
12,985
aten/src/ATen/native/quantized/cpu/qnnpack/src/hgemm/8x8-aarch32-neonfp16arith.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> .syntax unified # void pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith( # size_t mr, # size_t nr, # size_t k, # const __fp16*restrict a, # size_t a_stride, # const __fp16*restrict w, # __fp16*restrict c, # size_t c_stride, # const struct pytorch_qnnp_fp16_clamping_params clamping_params[restrict static 1]) BEGIN_FUNCTION pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load w # - ip = w LDR ip, [sp, 4] PUSH {r4, r5, r6, r7, r8, r9, r10, r11} VPUSH {d8-d15} # Initialize vacc0x01234567 # - q8 = d16:d17 := vacc0x01234567 = bias01234567 VLD1.16 {d16-d17}, [ip:64]! # Load a_stride # - r10 = a_stride LDR r10, [sp, 96] # Initialize vacc1x01234567 # - q9 := vacc1x01234567 = vacc0x01234567 VMOV.I16 q9, q8 # Initialize vacc2x01234567 # - q10 := vacc2x01234567 = vacc0x01234567 VMOV.I16 q10, q8 # Initialize vacc3x01234567 # - q11 := vacc3x01234567 = vacc0x01234567 VMOV.I16 q11, q8 # Initialize vacc4x01234567 # - q12 := vacc4x01234567 = vacc0x01234567 VMOV.I16 q12, q8 # Initialize vacc5x01234567 # - q13 := vacc5x01234567 = vacc0x01234567 VMOV.I16 q13, q8 # Initialize vacc6x01234567 # - q14 := vacc6x01234567 = vacc0x01234567 VMOV.I16 q14, q8 # Initialize vacc7x01234567 # - q15 := vacc7x01234567 = vacc0x01234567 VMOV.I16 q15, q8 CMP r0, 2 ADD r4, r3, r10 MOVLO r4, r3 ADD r5, r4, r10 MOVLS r5, r4 CMP r0, 4 ADD r6, r5, r10 MOVLO r6, r5 ADD r7, r6, r10 MOVLS r7, r6 CMP r0, 6 ADD r8, r7, r10 MOVLO r8, r7 ADD r9, r8, r10 MOVLS r9, r8 CMP r0, 8 ADD r10, r9, r10 MOVNE r10, r9 SUBS r2, r2, 4 BLO 1f .p2align 5 0: # Load a0 # - d0 = a0 VLD1.16 {d0}, [r3]! # Load a1 # - d1 = a1 VLD1.16 {d1}, [r4]! # Load a2 # - d2 = a2 VLD1.16 {d2}, [r5]! # Load a3 # - d3 = a3 VLD1.16 {d3}, [r6]! # Load a4 # - d4 = a4 VLD1.16 {d4}, [r7]! # Load a5 # - d5 = a5 VLD1.16 {d5}, [r8]! # Load a6 # - d6 = a6 VLD1.16 {d6}, [r9]! # Load a7 # - d7 = a7 VLD1.16 {d7}, [r10]! ### Channel 0 ### # Load b0-b15 (channel 0) # - q4 = d8:d9 = b0-b15 VLD1.8 {d8-d9}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[0]; .word 0xF3D80140 @ VMLA.F16 q8, q4, d0[0] # vacc1x01234567 += vb01234567 * va1[0]; .word 0xF3D82141 @ VMLA.F16 q9, q4, d1[0] # vacc2x01234567 += vb01234567 * va2[0]; .word 0xF3D84142 @ VMLA.F16 q10, q4, d2[0] # vacc3x01234567 += vb01234567 * va3[0]; .word 0xF3D86143 @ VMLA.F16 q11, q4, d3[0] # vacc4x01234567 += vb01234567 * va4[0]; .word 0xF3D88144 @ VMLA.F16 q12, q4, d4[0] # vacc5x01234567 += vb01234567 * va5[0]; .word 0xF3D8A145 @ VMLA.F16 q13, q4, d5[0] # vacc6x01234567 += vb01234567 * va6[0]; .word 0xF3D8C146 @ VMLA.F16 q14, q4, d6[0] # vacc7x01234567 += vb01234567 * va7[0]; .word 0xF3D8E147 @ VMLA.F16 q15, q4, d7[0] ### Channel 1 ### # Load b0-b15 (channel 1) # - q5 = d10:d11 = b0-b15 VLD1.8 {d10-d11}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[1]; .word 0xF3DA0148 @ VMLA.F16 q8, q5, d0[1] # vacc1x01234567 += vb01234567 * va1[1]; .word 0xF3DA2149 @ VMLA.F16 q9, q5, d1[1] # vacc2x01234567 += vb01234567 * va2[1]; .word 0xF3DA414A @ VMLA.F16 q10, q5, d2[1] # vacc3x01234567 += vb01234567 * va3[1]; .word 0xF3DA614B @ VMLA.F16 q11, q5, d3[1] # vacc4x01234567 += vb01234567 * va4[1]; .word 0xF3DA814C @ VMLA.F16 q12, q5, d4[1] # vacc5x01234567 += vb01234567 * va5[1]; .word 0xF3DAA14D @ VMLA.F16 q13, q5, d5[1] # vacc6x01234567 += vb01234567 * va6[1]; .word 0xF3DAC14E @ VMLA.F16 q14, q5, d6[1] # vacc7x01234567 += vb01234567 * va7[1]; .word 0xF3DAE14F @ VMLA.F16 q15, q5, d7[1] ### Channel 2 ### # Load b0-b15 (channel 2) # - q6 = d12:d13 = b0-b15 VLD1.8 {d12-d13}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[2]; .word 0xF3DC0160 @ VMLA.F16 q8, q6, d0[2] # vacc1x01234567 += vb01234567 * va1[2]; .word 0xF3DC2161 @ VMLA.F16 q9, q6, d1[2] # vacc2x01234567 += vb01234567 * va2[2]; .word 0xF3DC4162 @ VMLA.F16 q10, q6, d2[2] # vacc3x01234567 += vb01234567 * va3[2]; .word 0xF3DC6163 @ VMLA.F16 q11, q6, d3[2] # vacc4x01234567 += vb01234567 * va4[2]; .word 0xF3DC8164 @ VMLA.F16 q12, q6, d4[2] # vacc5x01234567 += vb01234567 * va5[2]; .word 0xF3DCA165 @ VMLA.F16 q13, q6, d5[2] # vacc6x01234567 += vb01234567 * va6[2]; .word 0xF3DCC166 @ VMLA.F16 q14, q6, d6[2] # vacc7x01234567 += vb01234567 * va7[2]; .word 0xF3DCE167 @ VMLA.F16 q15, q6, d7[2] ### Channel 3 ### # Load b0-b15 (channel 3) # - q7 = d14:d15 = b0-b15 VLD1.8 {d14-d15}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[3]; .word 0xF3DE0168 @ VMLA.F16 q8, q7, d0[3] # vacc1x01234567 += vb01234567 * va1[3]; .word 0xF3DE2169 @ VMLA.F16 q9, q7, d1[3] # vacc2x01234567 += vb01234567 * va2[3]; .word 0xF3DE416A @ VMLA.F16 q10, q7, d2[3] # vacc3x01234567 += vb01234567 * va3[3]; .word 0xF3DE616B @ VMLA.F16 q11, q7, d3[3] # vacc4x01234567 += vb01234567 * va4[3]; .word 0xF3DE816C @ VMLA.F16 q12, q7, d4[3] # vacc5x01234567 += vb01234567 * va5[3]; .word 0xF3DEA16D @ VMLA.F16 q13, q7, d5[3] # vacc6x01234567 += vb01234567 * va6[3]; .word 0xF3DEC16E @ VMLA.F16 q14, q7, d6[3] # vacc7x01234567 += vb01234567 * va7[3]; .word 0xF3DEE16F @ VMLA.F16 q15, q7, d7[3] SUBS r2, r2, 4 BHS 0b 1: CMP r2, -4 BEQ 2f ADD r3, r3, r2, LSL #1 ADD r4, r4, r2, LSL #1 ADD r5, r5, r2, LSL #1 ADD r6, r6, r2, LSL #1 ADD r7, r7, r2, LSL #1 ADD r8, r8, r2, LSL #1 ADD r9, r9, r2, LSL #1 ADD r10, r10, r2, LSL #1 LSL r2, r2, 4 VDUP.32 d14, r2 # Load a0 # - d0 = a0 VLD1.16 {d0}, [r3]! VSHL.U64 d0, d0, d14 # Load a1 # - d1 = a1 VLD1.16 {d1}, [r4]! VSHL.U64 d1, d1, d14 # Load a2 # - d2 = a2 VLD1.16 {d2}, [r5]! VSHL.U64 d2, d2, d14 # Load a3 # - d3 = a3 VLD1.16 {d3}, [r6]! VSHL.U64 d3, d3, d14 # Load a4 # - d4 = a4 VLD1.16 {d4}, [r7]! VSHL.U64 d4, d4, d14 # Load a5 # - d5 = a5 VLD1.16 {d5}, [r8]! VSHL.U64 d5, d5, d14 # Load a6 # - d6 = a6 VLD1.16 {d6}, [r9]! VSHL.U64 d6, d6, d14 # Load a7 # - d7 = a7 VLD1.16 {d7}, [r10]! VSHL.U64 d7, d7, d14 ### Channel 0 ### # Load b0-b15 (channel 0) # - q4 = d8:d9 = b0-b15 VLD1.8 {d8-d9}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[0]; .word 0xF3D80140 @ VMLA.F16 q8, q4, d0[0] # vacc1x01234567 += vb01234567 * va1[0]; .word 0xF3D82141 @ VMLA.F16 q9, q4, d1[0] # vacc2x01234567 += vb01234567 * va2[0]; .word 0xF3D84142 @ VMLA.F16 q10, q4, d2[0] # vacc3x01234567 += vb01234567 * va3[0]; .word 0xF3D86143 @ VMLA.F16 q11, q4, d3[0] # vacc4x01234567 += vb01234567 * va4[0]; .word 0xF3D88144 @ VMLA.F16 q12, q4, d4[0] # vacc5x01234567 += vb01234567 * va5[0]; .word 0xF3D8A145 @ VMLA.F16 q13, q4, d5[0] # vacc6x01234567 += vb01234567 * va6[0]; .word 0xF3D8C146 @ VMLA.F16 q14, q4, d6[0] # vacc7x01234567 += vb01234567 * va7[0]; .word 0xF3D8E147 @ VMLA.F16 q15, q4, d7[0] CMP r2, -32 BLO 2f ### Channel 1 ### # Load b0-b15 (channel 1) # - q5 = d10:d11 = b0-b15 VLD1.8 {d10-d11}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[1]; .word 0xF3DA0148 @ VMLA.F16 q8, q5, d0[1] # vacc1x01234567 += vb01234567 * va1[1]; .word 0xF3DA2149 @ VMLA.F16 q9, q5, d1[1] # vacc2x01234567 += vb01234567 * va2[1]; .word 0xF3DA414A @ VMLA.F16 q10, q5, d2[1] # vacc3x01234567 += vb01234567 * va3[1]; .word 0xF3DA614B @ VMLA.F16 q11, q5, d3[1] # vacc4x01234567 += vb01234567 * va4[1]; .word 0xF3DA814C @ VMLA.F16 q12, q5, d4[1] # vacc5x01234567 += vb01234567 * va5[1]; .word 0xF3DAA14D @ VMLA.F16 q13, q5, d5[1] # vacc6x01234567 += vb01234567 * va6[1]; .word 0xF3DAC14E @ VMLA.F16 q14, q5, d6[1] # vacc7x01234567 += vb01234567 * va7[1]; .word 0xF3DAE14F @ VMLA.F16 q15, q5, d7[1] BLS 2f ### Channel 2 ### # Load b0-b15 (channel 2) # - q6 = d12:d13 = b0-b15 VLD1.8 {d12-d13}, [ip:64]! # vacc0x01234567 += vb01234567 * va0[2]; .word 0xF3DC0160 @ VMLA.F16 q8, q6, d0[2] # vacc1x01234567 += vb01234567 * va1[2]; .word 0xF3DC2161 @ VMLA.F16 q9, q6, d1[2] # vacc2x01234567 += vb01234567 * va2[2]; .word 0xF3DC4162 @ VMLA.F16 q10, q6, d2[2] # vacc3x01234567 += vb01234567 * va3[2]; .word 0xF3DC6163 @ VMLA.F16 q11, q6, d3[2] # vacc4x01234567 += vb01234567 * va4[2]; .word 0xF3DC8164 @ VMLA.F16 q12, q6, d4[2] # vacc5x01234567 += vb01234567 * va5[2]; .word 0xF3DCA165 @ VMLA.F16 q13, q6, d5[2] # vacc6x01234567 += vb01234567 * va6[2]; .word 0xF3DCC166 @ VMLA.F16 q14, q6, d6[2] # vacc7x01234567 += vb01234567 * va7[2]; .word 0xF3DCE167 @ VMLA.F16 q15, q6, d7[2] .p2align 4 2: # Load params: # - ip = params LDR ip, [sp, 112] # Load scale: # - q0 = d0:d1 = vscale VLD1.16 {d0[], d1[]}, [ip]! .word 0xF3500DD0 @ VMUL.F16 q8, q8, q0 .word 0xF3522DD0 @ VMUL.F16 q9, q9, q0 .word 0xF3544DD0 @ VMUL.F16 q10, q10, q0 .word 0xF3566DD0 @ VMUL.F16 q11, q11, q0 .word 0xF3588DD0 @ VMUL.F16 q12, q12, q0 .word 0xF35AADD0 @ VMUL.F16 q13, q13, q0 .word 0xF35CCDD0 @ VMUL.F16 q14, q14, q0 .word 0xF35EEDD0 @ VMUL.F16 q15, q15, q0 # Load max: # - q1 = d2:d3 = vmax VLD1.16 {d2[], d3[]}, [ip]! .word 0xF2700FC2 @ VMIN.F16 q8, q8, q1 .word 0xF2722FC2 @ VMIN.F16 q9, q9, q1 .word 0xF2744FC2 @ VMIN.F16 q10, q10, q1 .word 0xF2766FC2 @ VMIN.F16 q11, q11, q1 .word 0xF2788FC2 @ VMIN.F16 q12, q12, q1 .word 0xF27AAFC2 @ VMIN.F16 q13, q13, q1 .word 0xF27CCFC2 @ VMIN.F16 q14, q14, q1 .word 0xF27EEFC2 @ VMIN.F16 q15, q15, q1 # Load min: # - q2 = d4:d5 = vmin VLD1.16 {d4[], d5[]}, [ip] .word 0xF2500FC4 @ VMAX.F16 q8, q8, q2 .word 0xF2522FC4 @ VMAX.F16 q9, q9, q2 .word 0xF2544FC4 @ VMAX.F16 q10, q10, q2 .word 0xF2566FC4 @ VMAX.F16 q11, q11, q2 .word 0xF2588FC4 @ VMAX.F16 q12, q12, q2 .word 0xF25AAFC4 @ VMAX.F16 q13, q13, q2 .word 0xF25CCFC4 @ VMAX.F16 q14, q14, q2 .word 0xF25EEFC4 @ VMAX.F16 q15, q15, q2 # Load c, c_stride: # - r2 = c # - r3 = c_stride LDRD r2, r3, [sp, 104] CMP r0, 2 ADD r4, r2, r3 MOVLO r4, r2 ADD r5, r4, r3 MOVLS r5, r4 CMP r0, 4 ADD r6, r5, r3 MOVLO r6, r5 ADD r7, r6, r3 MOVLS r7, r6 CMP r0, 6 ADD r8, r7, r3 MOVLO r8, r7 ADD r9, r8, r3 MOVLS r9, r8 CMP r0, 8 ADD r3, r9, r3 MOVNE r3, r9 CMP r1, 8 BNE 4f VST1.16 {d16-d17}, [r2] VST1.16 {d18-d19}, [r4] VST1.16 {d20-d21}, [r5] VST1.16 {d22-d23}, [r6] VST1.16 {d24-d25}, [r7] VST1.16 {d26-d27}, [r8] VST1.16 {d28-d29}, [r9] VST1.16 {d30-d31}, [r3] VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr .p2align 3 4: CMP r1, 4 BLO 5f VST1.16 {d16}, [r2]! VST1.16 {d18}, [r4]! VST1.16 {d20}, [r5]! VST1.16 {d22}, [r6]! VST1.16 {d24}, [r7]! VST1.16 {d26}, [r8]! VST1.16 {d28}, [r9]! VST1.16 {d30}, [r3]! SUB r1, 4 VMOV.I16 d16, d17 VMOV.I16 d18, d19 VMOV.I16 d20, d21 VMOV.I16 d22, d23 VMOV.I16 d24, d25 VMOV.I16 d26, d27 VMOV.I16 d28, d29 VMOV.I16 d30, d31 5: CMP r1, 2 BLO 6f VST1.32 {d16[0]}, [r2]! VST1.32 {d18[0]}, [r4]! VST1.32 {d20[0]}, [r5]! VST1.32 {d22[0]}, [r6]! VST1.32 {d24[0]}, [r7]! VST1.32 {d26[0]}, [r8]! VST1.32 {d28[0]}, [r9]! VST1.32 {d30[0]}, [r3]! SUB r1, 2 VEXT.8 d16, d16, d16, 4 VEXT.8 d18, d18, d18, 4 VEXT.8 d20, d20, d20, 4 VEXT.8 d22, d22, d22, 4 VEXT.8 d24, d24, d24, 4 VEXT.8 d26, d26, d26, 4 VEXT.8 d28, d28, d28, 4 VEXT.8 d30, d30, d30, 4 6: TEQ r1, 0 BEQ 7f VST1.16 {d16[0]}, [r2] VST1.16 {d18[0]}, [r4] VST1.16 {d20[0]}, [r5] VST1.16 {d22[0]}, [r6] VST1.16 {d24[0]}, [r7] VST1.16 {d26[0]}, [r8] VST1.16 {d28[0]}, [r9] VST1.16 {d30[0]}, [r3] 7: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
6,228
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x4-packA-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> # r0 mr # r1 k # r2 a # r3 a_stride .syntax unified # Args passed via stack. # TOS # |----------------| # |packed_a | 0 # |----------------| # # After loading w pointer in ip reg. # And after pushing r4-r9 and d8-d15 on stack # |----------------| # |r4 - r11 | 0 # |packed_a | 32 # |----------------| # # Packed A format. # 4kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory. # Original A # --------- K ----------- -- (K + 4 - 1) / 4 -- # | | | | # | | (M + 4 - 1)/4 | # | | Packed | | # M | => |-------------------| # | | Thus Packed A has (K + 4 - 1)/4 * (M + 4 -1)/4 blocks # | | # |---------------------| # # Each 4 x 4 blocks is transposed and stored. # Each of the (K + 4 - 1)/4 blocks for a given group of 4 m blocks # are stored adjacent in memory # Thus, each block: # |----4m-----|----4m-----| # 4k | | ..... (K + 4 - 1)/4 blocks # |-----------|-----------| # This locality helps in loading 8kx4m blocks of activations # Note when M is not multiple of 4, the rest can contain arbitrary # data in packed A as we will not be writing those out. # This wil be taken care by just copying the appropriate valid data # Also note that this packing is same as taking for 4x1 pattern. # This is because all the adjacent k's are laid next to each other # in both 4x4 as well as 4x1 blocking (mrxkr) # So this packing kernel can be used by compute kernel that assumes # 8x1 sparsity pattern and has register blocking of 4x8 # void pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon( # size_t mr, # size_t K, # const uint8_t* a, # size_t a_stride, # uint8_t* packed_a, BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif PUSH {r4, r5, r6, r7, r8, r9, r10, r11} # r4 = a0 = a pointer MOV r4, r2 # r2 = packed_a pointer LDR r2, [sp, 32] CMP r0, 2 # r5 = a1 ADD r5, r4, r3 MOVLO r5, r4 # r6 = a2 ADD r6, r5, r3 MOVLS r6, r5 CMP r0, 4 # r7 = a3 ADD r7, r6, r3 MOVNE r7, r6 # num_k_blocks = (k + (4 - 1)) / 4 ADD r1, r1, 3 LSR r1, r1, 2 SUBS r1, r1, 2 BLO 1f .p2align 5 k_loop: VLD1.8 {d0}, [r4]! VLD1.8 {d1}, [r5]! VLD1.8 {d2}, [r6]! VLD1.8 {d3}, [r7]! # Now we have 4x8 block of values that we will tranpose # A matrix # -------------------------------- # | | # |a0-----a3 a4-----a7....| # |b0 B00 b3 b4 B01 b7....| # |c0 c3 c4 c7....| # |d0-----d3 d4-----d7....| # | | # | | # ------------------------------- # {va01, va23} = B00 + B01 = 2 uint8x16_t # Sequence: # VTRN.8 d0, d1 // low(va01), high(va01) # VTRN.8 d2, d3 // low(va23), high(va23) # VTRN.16 q0, q1 // va01, va23 # Now we have # d0 = d4, c4, b4, a4 : d0, c0, b0, a0 # d1 = d5, c5, b5, a5 : d1, c1, b1, a1 # d2 = d6, c6, b6, a6 : d2, c2, b2, a2 # d3 = d7, c7, b7, a7 : d3, c3, b3, a3 # Thus 2 4x4 blocks are transposed. # Now we have all 2 B00, B01 transposed. VTRN.8 d0, d1 VTRN.8 d2, d3 VTRN.16 q0, q1 # Now VTRN.32 d0, d1 # Now VTRN.32 d2, d3 # Thus we have # d0 = d1, c1, b1, a1 : d0, c0, b0, a0 # d1 = d5, c5, b5, a5 : d4, c4, b4, a4 # d2 = d3, c3, b3, a3 : d2, c2, b2, a2 # d3 = d7, c7, b7, a7 : d6, c6, b6, a6 # Then we can do # VSWP d1, d2 # d0 = d1, c1, b1, a1 : d0, c0, b0, a0 # d1 = d3, c3, b3, a3 : d2, c2, b2, a2 # d2 = d5, c5, b5, a5 : d4, c4, b4, a4 # d3 = d7, c7, b7, a7 : d6, c6, b6, a6 # Now we can store q0 contiguously followed VTRN.32 d0, d1 VTRN.32 d2, d3 VSWP d1, d2 # Now store the tranposed values # d0, d1, d2, d3 VST1.8 {q0}, [r2]! VST1.8 {q1}, [r2]! SUBS r1, r1, 2 BHS k_loop 1: CMP r1, -2 BEQ 2f VLD1.32 {d0[]}, [r4] VLD1.32 {d1[]}, [r5] VLD1.32 {d2[]}, [r6] VLD1.32 {d3[]}, [r7] # Now we have 4x8 block of values that we will tranpose # _d{0-3} are arm neon vector registers # va0 = _d0 = a0 a1 a2 a3 # va1 = _d1 = b0 b1 b2 b3 # va2 = _d2 = c0 c1 c2 c3 # va3 = _d3 = d0 d1 d2 d3 # A matrix # ---------------------------- # | | # | a0-----a3| # | b0 B00 b3| # | last block c0 c3| # | d0-----d3| # | | # | | # --------------------------- # Sequence: # VTRN.8 d0, d1 // va0, va1 # VTRN.8 d2, d3 // va2, va3 # Now we have # d0 = b2, a2, b0, a0 # d1 = b3, a3, b1, a1 # d2 = d2, c2, d0, c0 # d3 = d3, c3, d1, c1 # Sequence: # VTRN.16 d0, d2 # VTRN.16 d1, d3 # Now we have # d0 = d0, c0, b0, a0 # d1 = d1, c1, b1, a1 # d2 = d2, c2, b2, a2 # d3 = d3, c3, b3, a3 VTRN.8 d0, d1 VTRN.8 d2, d3 VTRN.16 d0, d2 VTRN.16 d1, d3 # Since upper half of d0 just contains duplicate values # We dont want to store those # So let's combine upper half of d0 to the lower part of d0 # And lower half of d1 to upper half of d0 # Same for d2, d3 VEXT.8 d0, d0, d1, #4 VEXT.8 d1, d2, d3, #4 # Now store the tranposed values # d0, d1, d2, d3 VST1.8 {q0}, [r2] .p2align 4 2: POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
7,334
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> # r0 mr # r1 k # r2 a # r3 a_stride .syntax unified # Args passed via stack. # TOS # |----------------| # |packed_a | 0 # |----------------| # # After loading w pointer in ip reg. # And after pushing r4-r9 and d8-d15 on stack # |----------------| # |r4 - r11 | 0 # |packed_a | 32 # |----------------| # # Packed A format. # 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory. # Original A # --------- K ----------- -- (K + 4 - 1) / 4 -- # | | | | # | | (M + 8 - 1)/8 | # | | Packed | | # M | => |-------------------| # | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks # | | # |---------------------| # # Each 8 x 4 blocks is transposed and stored. # Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks # are stored adjacent in memory # Thus, each block: # |----8m-----|----8m-----| # 4k | | ..... (K + 4 - 1)/4 blocks # |-----------|-----------| # This locality helps in loading 8kx8m blocks of activations # Note when M is not multiple of 8, the rest can contain arbitrary # data in packed A as we will not be writing those out. # This wil be taken care by just copying the appropriate valid data # void pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon( # size_t mr, # size_t K, # const uint8_t* a, # size_t a_stride, # uint8_t* packed_a, BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif PUSH {r4, r5, r6, r7, r8, r9, r10, r11} # r4 = a0 = a pointer MOV r4, r2 # r2 = packed_a pointer LDR r2, [sp, 32] CMP r0, 2 # r5 = a1 ADD r5, r4, r3 MOVLO r5, r4 # r6 = a2 ADD r6, r5, r3 MOVLS r6, r5 CMP r0, 4 # r7 = a3 ADD r7, r6, r3 MOVLO r7, r6 # r8 = a4 ADD r8, r7, r3 MOVLS r8, r7 CMP r0, 6 # r9 = a5 ADD r9, r8, r3 MOVLO r9, r8 # r10 = a6 ADD r10, r9, r3 MOVLS r10, r9 CMP r0, 8 # r11 = a7 ADD r11, r10, r3 MOVNE r11, r10 # num_k_blocks = (k + (4 - 1)) / 4 ADD r1, r1, 3 LSR r1, r1, 2 SUBS r1, r1, 2 BLO 1f .p2align 5 k_loop: VLD1.8 {d0}, [r4]! VLD1.8 {d1}, [r5]! VLD1.8 {d2}, [r6]! VLD1.8 {d3}, [r7]! VLD1.8 {d4}, [r8]! VLD1.8 {d5}, [r9]! VLD1.8 {d6}, [r10]! VLD1.8 {d7}, [r11]! # Now we have 8x8 block of values that we will tranpose # A matrix # -------------------------------- # | | # |a0-----a3........a4-----a7....| # |b0 B00 b3........b4 B01 b7....| # |c0 c3........c4 c7....| # |d0-----d3........d4-----d7....| # |e0-----e3........e4-----e7....| # |f0 B10 f3........f4 B11 f7....| # |g0 g3........g4 g7....| # |h0-----h3........h4-----h7....| # | | # | | # ------------------------------- # {va01, va23} = B00 + B01 = 2 uint8x16_t # {va34, va56} = B10 + B11 = 2 uint8x16_t # Sequence: # VTRN.8 d0, d1 // low(va01), high(va01) # VTRN.8 d2, d3 // low(va23), high(va23) # VTRN.16 q0, q1 // va01, va23 # Now we have # d0 = d4, c4, b4, a4 : d0, c0, b0, a0 # d1 = d5, c5, b5, a5 : d1, c1, b1, a1 # d2 = d6, c6, b6, a6 : d2, c2, b2, a2 # d3 = d7, c7, b7, a7 : d3, c3, b3, a3 # Thus 2 4x4 blocks are transposed. # Now we will transpose 2 more sets of 4x4 blocks # Sequence: # VTRN.8 d4, d5 // low(va45), high(va45) # VTRN.8 d6, d7 // low(va67), high(va67) # VTRN.16 q2, q3 // va45, va67 # Now we have # d4 = h4, g4, f4, e4 : h0, g0, f0, e0 # d5 = h5, g5, f5, e5 : h1, g1, f1, e1 # d6 = h6, g6, f6, e6 : h2, g2, f2, e2 # d7 = h7, g7, f7, e7 : h3, g3, f3, e3 # Now we have all 4 B00, B01, B10, B11 # transposed. # We can now combine them to create one # 8x8 transposed block. # Sequence: # VTRN.32 q0, q2 # VTRN.32 q1, q3 # d0 = h0, g0, f0, e0 : d0, c0, b0, a0 # d1 = h1, g1, f1, e1 : d1, c1, b1, a1 # d4 = h4, g4, f4, e4 : d4, c4, b4, a4 # d5 = h5, g5, f5, e5 : d5, c5, b5, a5 # d2 = h2, g2, f2, e2 : d2, c2, b2, a2 # d3 = h3, g3, f3, e3 : d3, c3, b3, a3 # d6 = h6, g6, f6, e6 : d6, c6, b6, a6 # d7 = h7, g7, f7, e7 : d7, c7, b7, a7 VTRN.8 d0, d1 VTRN.8 d2, d3 VTRN.16 q0, q1 VTRN.8 d4, d5 VTRN.8 d6, d7 VTRN.16 q2, q3 VTRN.32 q0, q2 VTRN.32 q1, q3 # Now store the tranposed values # d0, d1, d2, d3 # then d4, d5, d6, d7 contiguously VST1.8 {q0}, [r2]! VST1.8 {q1}, [r2]! VST1.8 {q2}, [r2]! VST1.8 {q3}, [r2]! SUBS r1, r1, 2 BHS k_loop 1: CMP r1, -2 BEQ 2f VLD1.32 {d0[]}, [r4] VLD1.32 {d1[]}, [r8] VLD1.32 {d2[]}, [r5] VLD1.32 {d3[]}, [r9] VLD1.32 {d4[]}, [r6] VLD1.32 {d5[]}, [r10] VLD1.32 {d6[]}, [r7] VLD1.32 {d7[]}, [r11] # Now we have 4x8 block of values that we will tranpose # _d{0-3} are arm neon vector registers # va04 = _d0 = a0 a1 a2 a3 e0 e1 e2 e3 # va15 = _d1 = b0 b1 b2 b3 f0 f1 f2 f3 # va26 = _d2 = c0 c1 c2 c3 g0 g1 g2 g3 # va37 = _d3 = d0 d1 d2 d3 h0 h1 h2 h3 # A matrix # ---------------------------- # | | # | a0-----a3| # | b0 B00 b3| # | last block c0 c3| # | d0-----d3| # | e0-----e3| # | f0 B01 f3| # | g0 g3| # | h0-----h3| # | | # | | # --------------------------- # Sequence: # VTRN.8 d0, d1 // va04, va15 # VTRN.8 d2, d3 // va26, va37 # Now we have # d0 = f2, e2, f0, e0 : b2, a2, b0, a0 # d1 = f3, e3, f1, e1 : b3, a3, b1, a1 # d2 = h2, g2, h0, g0 : d2, c2, d0, c0 # d3 = h3, g3, h1, g1 : d3, c3, d1, c1 # Sequence: # VTRN.16 d0, d2 # VTRN.16 d1, d3 # Now we have # d0 = h0, g0, f0, e0 : d0, c0, b0, a0 # d1 = h1, g1, f1, e1 : d1, c1, b1, a1 # d2 = h2, g2, f2, e2 : d2, c2, b2, a2 # d3 = h3, g3, f3, e3 : d3, c3, b3, a3 VEXT.8 d0, d0, d1, #4 VEXT.8 d1, d2, d3, #4 VEXT.8 d2, d4, d5, #4 VEXT.8 d3, d6, d7, #4 VTRN.8 d0, d1 VTRN.8 d2, d3 VTRN.16 d0, d2 VTRN.16 d1, d3 # Now store the tranposed values # d0, d1, d2, d3 # then d4, d5, d6, d7 contiguously VST1.8 {q0}, [r2]! VST1.8 {q1}, [r2] .p2align 4 2: POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
34,401
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c1x4-dq-packedA-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> #ifndef __APPLE__ #define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon #else #define NDEF_APPLE_SYMBOLS #endif # r0 mr # r1 nr # r2 packed_a # r3 packed_w # d14 a_zero_point # d15 b_zero_point ## Stack # 4 a_stride # 4 packed_w # 4 w_row_ptr # 4 w_block_ids_ptr # 4 b # 4 c # 4 c_stride # 4 output channel index # 4 quantization_params # -- .syntax unified # Args passed via stack. # TOS # |----------------| # |packed_w | 0 # |w_row_ptr | 4 # |w_block_ids_ptr | 8 # |b | 12 # |c | 16 # |c_stride | 20 # |out ch indx | 24 # |params | 28 # |----------------| # # After loading w pointer in ip reg. # And after pushing r4-r9 and d8-d15 on stack # |----------------| # |d8 - d15 | 0 # |r4 - r11,lr | 64 # |w_row_ptr | 100 # |w_block_ids_ptr | 104 # |b | 108 # |c | 112 # |c_stride | 116 # |out ch indx | 120 # |params | 124 # |----------------| # # void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) #define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\ BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\ .arm ;\ NDEF_APPLE_SYMBOLS ;\ ;\ PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\ VPUSH {d8-d15} ;\ ;\ /* Store nr in r11 as well for late user. */ ;\ MOV r11, r1 ;\ /* Load output channel index */ ;\ LDR r5, [sp, 120] ;\ /* Load quantization params */ ;\ /* - r7 = quantization_params */ ;\ LDR r7, [sp, 124] ;\ /* Load input_zero_point */ ;\ VLD1.8 {d16[]}, [r7] ;\ ADD r7, r7, 4 ;\ /* Load pointer to per channel zero points array */ ;\ LDR r4, [r7] ;\ /* Add output_channel_index to the b_zero_point pointer */ ;\ ADD r4, r4, r5 ;\ ;\ /* We enter the loop if r1 is atleast 1. */ ;\ /* r1 = r1 - 1 will happen in the epilogue */ ;\ /* of the loop */ ;\ CMP r1, 1 ;\ BLO _7_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ /* Load w_row_ptr + n */ ;\ LDR r5, [sp, 100] ;\ /* r7 = blocks_id_ptr */ ;\ LDR r7, [sp, 104] ;\ ;\ .p2align 5 ;\ _0_w##W_INDEX_DTYPE_NUM_BITS##: ;\ VEOR q10, q10, q10 ;\ VLD1.8 {d17[]}, [r4]! ;\ /* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\ /* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\ LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\ LOAD_INDEX_INSTRUCTION lr, [r5] ;\ /* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ ;\ /* This points to the first block of nonzero value */ ;\ /* for the nth row. */ ;\ ADD r6, r3, ip, LSL #2 ;\ /* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\ /* LSL for when elements are >1 byte */ ;\ /* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\ /* This points to the block id of the first block */ ;\ /* It should contain lr - ip number of block ids */ ;\ ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\ /* r8 = num_blocks that needs to be processed */ ;\ SUB r8, lr, ip ;\ SUBS r8, r8, 2 ;\ BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\ /* Load 2 non zero blocks of weights. Each block = 1x4. */ ;\ VLD1.8 {d0}, [r6]! ;\ ;\ /* ip = block_id_ptr[0] */ ;\ /* lr = block_id_ptr[1] */ ;\ LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\ LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\ ;\ /* Add offset to r2 */ ;\ /* Shift by 4 because each packed block is a block of 4x4 */ ;\ /* which 16 bytes */ ;\ ADD r10, r2, ip, LSL #4 ;\ /* q9 = vxb */ ;\ VSUBL.U8 q0, d0, d17 ;\ ;\ /* d2, d3 = 4x4 transposed */ ;\ VLD1.8 {d2}, [r10]! ;\ VLD1.8 {d3}, [r10] ;\ ;\ ADD r10, r2, lr, LSL #4 ;\ ;\ VSUBL.U8 q4, d2, d16 /* vxa0_t */ ;\ ;\ /* d4, d5 = next 4x4 transposed */ ;\ VLD1.8 {d4}, [r10]! ;\ VLD1.8 {d5}, [r10] ;\ ;\ VSUBL.U8 q5, d3, d16 /* vxa1_t */ ;\ VSUBL.U8 q6, d4, d16 /* vxa4_t */ ;\ VSUBL.U8 q7, d5, d16 /* vxa5_t */ ;\ ;\ /* q4, q5 = 4x4 block (16 values each of 16 bits) */ ;\ /* q6, q7 = 4x4 block (16 values each of 16 bits) */ ;\ ;\ VMLAL.S16 q10, d8, d0[0] ;\ VMLAL.S16 q10, d9, d0[1] ;\ VMLAL.S16 q10, d10, d0[2] ;\ VMLAL.S16 q10, d11, d0[3] ;\ VMLAL.S16 q10, d12, d1[0] ;\ VMLAL.S16 q10, d13, d1[1] ;\ VMLAL.S16 q10, d14, d1[2] ;\ VMLAL.S16 q10, d15, d1[3] ;\ ;\ SUBS r8, r8, 2 ;\ ;\ BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\ _1_w##W_INDEX_DTYPE_NUM_BITS##: ;\ CMP r8, -2 ;\ BEQ _2_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ /* Load last nonzero block */ ;\ /* For this we will load 4 8 bit values as one 32 bit value */ ;\ VLD1.32 {d0[]}, [r6]! ;\ /* q9 = vxb */ ;\ VSUBL.U8 q0, d0, d17 ;\ ;\ /* ip = block_id_ptr[0] */ ;\ LOAD_INDEX_INSTRUCTION ip, [r9] ;\ ;\ /* Add offset to r2 */ ;\ /* Shift by 4 because each packed block is a block of 4x4 */ ;\ /* which 16 bytes */ ;\ ADD r10, r2, ip, LSL #4 ;\ ;\ VLD1.8 {d2}, [r10]! ;\ VLD1.8 {d3}, [r10] ;\ ;\ VSUBL.U8 q4, d2, d16 /* vxa0_t */ ;\ VSUBL.U8 q5, d3, d16 /* vxa1_t */ ;\ ;\ VMLAL.S16 q10, d8, d0[0] ;\ VMLAL.S16 q10, d9, d0[1] ;\ VMLAL.S16 q10, d10, d0[2] ;\ VMLAL.S16 q10, d11, d0[3] ;\ ;\ .p2align 4 ;\ _2_w##W_INDEX_DTYPE_NUM_BITS##: ;\ /* Store result on stack */ ;\ ;\ /* -12 because TOS - 4, TOS - 8, and TOS - 12, store mr, nr and pointer to weight zp */ ;\ /* + 128 bytes of buffer when nr = 1 */ ;\ /* This is needed because after processing all nrs we will */ ;\ /* load 128 bytes from stack. This is for q10, q11 for max nr of 4 */ ;\ /* Thus we will load accumulators back in q0, q1, q2, q3, q4, q5, q6, q7 */ ;\ /* When nr < 4, extra q values will be fetched from stack which may overlap */ ;\ /* with other parts of stack storing local variables. To avoid that we just */ ;\ /* create a buffer of 128 bytes inbetween to make sure pointer increment */ ;\ /* never produces address that is beyond the stack frame of this function. */ ;\ SUB r9, sp, 140 ;\ /* Each iteration produce 4 values each of 4 bytes */ ;\ /* Thus 4 x 4 = 16 bytes 2^4 */ ;\ /* In this implementation, first value will be stored at */ ;\ /* 1st value: sp - 12 - r1 * 16 */ ;\ /* 2nd value: sp - 12 - (r1 - 1) * 16 */ ;\ /* and so on. */ ;\ SUB r9, r9, r1, LSL #4 ;\ VST1.32 {q10}, [r9] ;\ ;\ /* Check if nr >=1 */ ;\ SUBS r1, r1, 1 ;\ BHI _0_w##W_INDEX_DTYPE_NUM_BITS ;\ _3_w##W_INDEX_DTYPE_NUM_BITS##: ;\ /* First load all the accumulators from stack */ ;\ /* Load nr */ ;\ SUB r9, sp, 140 ;\ SUB r9, r9, r11, LSL #4 ;\ /* Now load q8-q15 */ ;\ /* This is 8x4 block (nrxmr) */ ;\ /* We will transpose this to 4x8 (mrxnr) */ ;\ /* q8, q12 : x00, x10, x20, x30; x04, x14, x24, x34 */ ;\ /* q9, q13 : x01, x11, x21, x31; x05, x15, x25, x35 */ ;\ /* q10, q14 : x02, x12, x22, x32; x06, x16, x26, x36 */ ;\ /* q11, q15 : x03, x13, x23, x33; x07, x17, x27, x37 */ ;\ VLD1.32 {q8}, [r9]! ;\ VLD1.32 {q9}, [r9]! ;\ VLD1.32 {q10}, [r9]! ;\ VLD1.32 {q11}, [r9]! ;\ VLD1.32 {q12}, [r9]! ;\ VLD1.32 {q13}, [r9]! ;\ VLD1.32 {q14}, [r9]! ;\ VLD1.32 {q15}, [r9] ;\ ;\ /*# Now transpose q8-11 */ ;\ /* VTRN.32 q8, q9 */ ;\ /* VTRN.32 q10, q11 */ ;\ /* q8 : X00, x01, x20, x21 */ ;\ /* q9 : X10, x11, x30, x31 */ ;\ /* q10: X02, x03, x22, x23 */ ;\ /* q11: X12, x13, x32, x33 */ ;\ /* VSWP d16, d17 */ ;\ /* q8 : x20, x21, x00, x01 */ ;\ /* VEXT.32 q6, q8, q10, 2 */ ;\ /* q6 : x00, x01, x02, x03 */ ;\ /* VEXT.32 q10, q10, q8, 2 */ ;\ /* q10: x22, x23, x20, x21 */ ;\ /* VSWP d20, d21 */ ;\ /* VMOV q8, q6 */ ;\ /* q8 : X00, x01, x02, x03 */ ;\ /* q10: x20, x21, x22, x23 */ ;\ /* VSWP d18, d19 */ ;\ /* q9 : x30, x31, x10, x11 */ ;\ /* VEXT.32 q6, q9, q11, 2 */ ;\ /* q6 : x10, x11, x12, x13 */ ;\ /* VEXT.32 q11, q11, q9, 2 */ ;\ /* q11: x32, x33, x30, x31 */ ;\ /* VSWP d22, d23 */ ;\ /* VMOV q9, q6 */ ;\ /* q9 : x10, x11, x12, x13 */ ;\ /* q11: x30, x31, x32, x33 */ ;\ /* Thus we have */ ;\ /* q8 : X00, x01, x02, x03 */ ;\ /* q9 : X10, x11, x12, x13 */ ;\ /* q10: X20, x21, x22, x23 */ ;\ /* q11: X30, x31, x32, x33 */ ;\ /* Now we can do the same for q4-q7 */ ;\ /* q12: X04, X05, X06, X07 */ ;\ /* q13: X14, X15, X16, X17 */ ;\ /* q14: X24, X25, X26, X27 */ ;\ /* q15: X34, X35, X36, X37 */ ;\ ;\ VTRN.32 q8, q9 ;\ VTRN.32 q10, q11 ;\ VSWP d16, d17 ;\ VEXT.32 q6, q8, q10, 2 ;\ VEXT.32 q10, q10, q8, 2 ;\ VSWP d20, d21 ;\ VMOV q8, q6 ;\ VSWP d18, d19 ;\ VEXT.32 q6, q9, q11, 2 ;\ VEXT.32 q11, q11, q9, 2 ;\ VSWP d22, d23 ;\ VMOV q9, q6 ;\ ;\ VTRN.32 q12, q13 ;\ VTRN.32 q14, q15 ;\ VSWP d24, d25 ;\ VEXT.32 q6, q12, q14, 2 ;\ VEXT.32 q14, q14, q12, 2 ;\ VSWP d28, d29 ;\ VMOV q12, q6 ;\ VSWP d26, d27 ;\ VEXT.32 q6, q13, q15, 2 ;\ VEXT.32 q15, q15, q13, 2 ;\ VSWP d30, d31 ;\ VMOV q13, q6 ;\ ;\ /* Load output channel index */ ;\ LDR r5, [sp, 120] ;\ /* Load quantization params */ ;\ /* - r7 = quantization_params */ ;\ LDR r7, [sp, 124] ;\ ADD r7, r7, 8 ;\ /* Load pointer to per channel requant scale */ ;\ LDR r7, [r7] ;\ /* Now r7 has the base_addr + offset for multipliers */ ;\ ADD r7, r7, r5, LSL #2 ;\ ;\ LDR r6, [sp, 108] ;\ /* Load q6: vmultiplier_c0123 */ ;\ VLD1.32 {d12, d13}, [r7]! ;\ /* Load q7: vmultiplier_c4567 */ ;\ VLD1.32 {d14, d15}, [r7] ;\ VCVT.F32.S32 q8, q8 ;\ VCVT.F32.S32 q9, q9 ;\ VCVT.F32.S32 q10, q10 ;\ VLD1.32 {q0}, [r6]! ;\ VLD1.32 {q1}, [r6] ;\ ;\ VCVT.F32.S32 q11, q11 ;\ VCVT.F32.S32 q12, q12 ;\ VCVT.F32.S32 q13, q13 ;\ VCVT.F32.S32 q14, q14 ;\ VCVT.F32.S32 q15, q15 ;\ ;\ VMUL.F32 q8, q8, q6 ;\ VMUL.F32 q9, q9, q6 ;\ VMUL.F32 q10, q10, q6 ;\ VMUL.F32 q11, q11, q6 ;\ VMUL.F32 q12, q12, q7 ;\ VMUL.F32 q13, q13, q7 ;\ VMUL.F32 q14, q14, q7 ;\ VMUL.F32 q15, q15, q7 ;\ ;\ VADD.F32 q8, q8, q0 ;\ VADD.F32 q9, q9, q0 ;\ VADD.F32 q10, q10, q0 ;\ VADD.F32 q11, q11, q0 ;\ VADD.F32 q12, q12, q1 ;\ VADD.F32 q13, q13, q1 ;\ VADD.F32 q14, q14, q1 ;\ VADD.F32 q15, q15, q1 ;\ ;\ /* Load c, c_stride: */ ;\ /* - r1 = c */ ;\ /* - r9 = c_stride */ ;\ LDR r1, [sp, 112] ;\ LDR r9, [sp, 116] ;\ LSL r9, r9, 2 ;\ ;\ /* r1 = c0 = c pointer */ ;\ ;\ CMP r0, 2 ;\ /* r2 = c1 */ ;\ ADD r2, r1, r9 ;\ MOVLO r2, r1 ;\ ;\ /* r3 = c2 */ ;\ ADD r3, r2, r9 ;\ MOVLS r3, r2 ;\ ;\ CMP r0, 4 ;\ /* r4 = c3 */ ;\ ADD r4, r3, r9 ;\ MOVNE r4, r3 ;\ ;\ CMP r11, 8 ;\ BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {q8}, [r1]! ;\ VST1.32 {q9}, [r2]! ;\ VST1.32 {q10}, [r3]! ;\ VST1.32 {q11}, [r4]! ;\ VST1.32 {q12}, [r1] ;\ VST1.32 {q13}, [r2] ;\ VST1.32 {q14}, [r3] ;\ VST1.32 {q15}, [r4] ;\ ;\ VPOP {d8-d15} ;\ POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\ BX lr ;\ ;\ .p2align 3 ;\ _4_w##W_INDEX_DTYPE_NUM_BITS##: ;\ CMP r11, 4 ;\ BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {q8}, [r1]! ;\ VST1.32 {q9}, [r2]! ;\ VST1.32 {q10}, [r3]! ;\ VST1.32 {q11}, [r4]! ;\ ;\ SUB r11, 4 ;\ ;\ VMOV.32 q8, q12 ;\ VMOV.32 q9, q13 ;\ VMOV.32 q10, q14 ;\ VMOV.32 q11, q15 ;\ ;\ _5_w##W_INDEX_DTYPE_NUM_BITS##: ;\ CMP r11, 2 ;\ BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {d16}, [r1]! ;\ VST1.32 {d18}, [r2]! ;\ VST1.32 {d20}, [r3]! ;\ VST1.32 {d22}, [r4]! ;\ ;\ SUB r11, 2 ;\ ;\ VEXT.32 q8, q8, 2 ;\ VEXT.32 q9, q9, 2 ;\ VEXT.32 q10, q10, 2 ;\ VEXT.32 q11, q11, 2 ;\ ;\ _6_w##W_INDEX_DTYPE_NUM_BITS##: ;\ TEQ r11, 0 ;\ BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {d16[0]}, [r1] ;\ VST1.32 {d18[0]}, [r2] ;\ VST1.32 {d20[0]}, [r3] ;\ VST1.32 {d22[0]}, [r4] ;\ ;\ _7_w##W_INDEX_DTYPE_NUM_BITS##: ;\ VPOP {d8-d15} ;\ POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\ BX lr ;\ ;\ END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon # void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w32__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint32_t* w_row_ptr, # const uint32_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR) # void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w16__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint16_t* w_row_ptr, # const uint16_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH) # void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w8__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint8_t* w_row_ptr, # const uint8_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB) #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif #undef NDEF_APPLE_SYMBOLS #undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON
ShaoxunZeng/PyTorch-Medusa
7,211
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-aarch64-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> # Packed A format. # 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory. # Original A # --------- K ----------- -- (K + 4 - 1) / 4 -- # | | | | # | | (M + 8 - 1)/8 | # | | Packed | | # M | => |-------------------| # | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks # | | # |---------------------| # # Each 8 x 4 blocks is transposed and stored. # Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks # are stored adjacent in memory # Thus, each block: # |----8m-----|----8m-----| # 4k | | ..... (K + 4 - 1)/4 blocks # |-----------|-----------| # This locality helps in loading 8kx8m blocks of activations # Note when M is not multiple of 8, the rest can contain arbitrary # data in packed A as we will not be writing those out. # This wil be taken care by just copying the appropriate valid data # void pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon( # size_t mr, # size_t K, # const uint8_t* a, # size_t a_stride, # uint8_t* packed_a, BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon # x2 = a0 = a pointer # x4 = packed_a pointer CMP x0, 2 # x5 = a1 ADD x5, x2, x3 CSEL x5, x2, x5, LO # x6 = a2 ADD x6, x5, x3 CSEL x6, x5, x6, LS CMP x0, 4 # x7 = a3 ADD x7, x6, x3 CSEL x7, x6, x7, LO # x8 = a4 ADD x8, x7, x3 CSEL x8, x7, x8, LS CMP x0, 6 # x9 = a5 ADD x9, x8, x3 CSEL x9, x8, x9, LO # x10 = a6 ADD x10, x9, x3 CSEL x10, x9, x10, LS CMP x0, 8 # x11 = a7 ADD x11, x10, x3 CSEL x11, x10, x11, NE # num_k_blocks = (k + (4 - 1)) / 4 ADD x1, x1, 3 LSR x1, x1, 2 SUBS x1, x1, 2 B.LO 1f .p2align 5 k_loop: LD1 {v0.d}[0], [x2], 8 LD1 {v0.d}[1], [x8], 8 LD1 {v1.d}[0], [x5], 8 LD1 {v1.d}[1], [x9], 8 LD1 {v2.d}[0], [x6], 8 LD1 {v2.d}[1], [x10], 8 LD1 {v3.d}[0], [x7], 8 LD1 {v3.d}[1], [x11], 8 # Now we have 8x8 block of values that we will tranpose # A matrix # ------------------------ # | | # |a0-----a3a4-----a7....| # |b0 B00 b3b4 B01 b7....| # |c0 c3c4 c7....| # |d0-----d3d4-----d7....| # |e0-----e3e4-----e7....| # |f0 B10 f3f4 B11 f7....| # |g0 g3g4 g7....| # |h0-----h3h4-----h7....| # | | # | | # ------------------------ # {v0.2d[1], v0.2d[0]} = B00[0]+ B01[0] + B10[0] + B11[0] # {v1.2d[1], v1.2d[0]} = B00[1]+ B01[1] + B10[1] + B11[1] # {v2.2d[1], v2.2d[0]} = B00[2]+ B01[2] + B10[2] + B11[2] # {v3.2d[1], v3.2d[0]} = B00[3]+ B01[3] + B10[3] + B11[3] # v0 = e7 e6 e5 e4 e3 e2 e1 e0; a7 a6 a5 a4 a3 a2 a1 a0 # v1 = f7 f6 f5 f4 f3 f2 f1 f0; b7 b6 b5 b4 b3 b2 b1 b0 # v2 = g7 g6 g5 g4 g3 g2 g1 g0; c7 c6 c5 c4 c3 c2 c1 c0 # v3 = h7 h6 h5 h4 h3 h2 h1 h0; d7 d6 d5 d4 d3 d2 d1 d0 # Sequence: # TRN1 v4.16b, v0.16b, v1.16b # TRN2 v5.16b, v0.16b, v1.16b # TRN1 v6.16b, v2.16b, v3.16b # TRN2 v7.16b, v2.16b, v3.16b # Now we have # v4 = f6 e6 f4 e4 f2 e2 f0 e0; b6 a6 b4 a4 b2 a2 b0 a0 # v5 = f7 e7 f5 e5 f3 e3 f1 e1; b7 a7 b5 a5 b3 a3 b1 a1 # v6 = h6 g6 h4 g4 h2 g2 h0 g0; d6 c6 d4 c4 d2 c2 d0 c0 # v7 = h7 g7 h5 g5 h3 g3 h1 g1; d7 c7 d5 c5 d3 c3 d1 c1 # TRN1 v0.8h, v4.8h, v6.8h # TRN2 v2.8h, v4.8h, v6.8h # TRN1 v1.8h, v5.8h, v7.8h # TRN2 v3.8h, v5.8h, v7.8h # v0 = h4 g4 f4 e4 h0 g0 f0 e0; d4 c4 b4 a4 d0 c0 b0 a0 # v1 = h5 g5 f5 e5 h1 g1 f1 e1; d5 c5 b5 a5 d1 c1 b1 a1 # v2 = h6 g6 f6 e6 h2 g2 f2 e2; d6 c6 b6 a6 d2 c2 b2 a2 # v3 = h7 g7 f7 e7 h3 g3 f3 e3; d7 c7 b7 a7 d3 c3 b3 a3 # UZP1 v4.4s, v0.4s, v1.4s # UZP2 v6.4s, v0.4s, v1.4s # UZP1 v5.4s, v2.4s, v3.4s # UZP2 v7.4s, v2.4s, v3.4s # v4 = h1 g1 f1 e1 d1 c1 b1 a1; h0 g0 f0 e0 d0 c0 b0 a0 # v5 = h3 g3 f3 e3 d3 c3 b3 a3; h2 g2 f2 e2 d2 c2 b2 a2 # v6 = h5 g5 f5 e5 d5 c5 b5 a5; h4 g4 f4 e4 d4 c4 b4 a4 # v7 = h7 g7 f7 e7 d7 c7 b7 a7; h6 g6 f6 e6 d6 c6 b6 a6 # Thus 2 8x4 blocks are transposed. TRN1 v4.16b, v0.16b, v1.16b TRN2 v5.16b, v0.16b, v1.16b TRN1 v6.16b, v2.16b, v3.16b TRN2 v7.16b, v2.16b, v3.16b TRN1 v0.8h, v4.8h, v6.8h TRN2 v2.8h, v4.8h, v6.8h TRN1 v1.8h, v5.8h, v7.8h TRN2 v3.8h, v5.8h, v7.8h UZP1 v4.4s, v0.4s, v1.4s UZP2 v6.4s, v0.4s, v1.4s UZP1 v5.4s, v2.4s, v3.4s UZP2 v7.4s, v2.4s, v3.4s ST1 {v4.16b}, [x4], 16 ST1 {v5.16b}, [x4], 16 ST1 {v6.16b}, [x4], 16 ST1 {v7.16b}, [x4], 16 SUBS x1, x1, 2 B.HS k_loop 1: CMP x1, -2 B.EQ 2f LD1 {v0.s}[0], [x2] LD1 {v0.s}[1], [x8] LD1 {v1.s}[0], [x5] LD1 {v1.s}[1], [x9] LD1 {v2.s}[0], [x6] LD1 {v2.s}[1], [x10] LD1 {v3.s}[0], [x7] LD1 {v3.s}[1], [x11] # Now we have 8x4 block of values that we will tranpose # A matrix # ---------------------------- # | | # | a0-----a3| # | b0 B00 b3| # | last block c0 c3| # | d0-----d3| # | e0-----e3| # | f0 B01 f3| # | g0 g3| # | h0-----h3| # | | # | | # --------------------------- # v0 = -; e3 e2 e1 e0 a3 a2 a1 a0 # v1 = -; f3 f2 f1 f0 b3 b2 b1 b0 # v2 = -; g3 g2 g1 g0 c3 c2 c1 c0 # v3 = -; h3 h2 h1 h0 d3 d2 d1 d0 # Sequence: # TRN1 v4.16b, v0.16b, v1.16b # TRN2 v5.16b, v0.16b, v1.16b # TRN1 v6.16b, v2.16b, v3.16b # TRN2 v7.16b, v2.16b, v3.16b # Now we have # v4 = -;f2 e2 f0 e0 b2 a2 b0 a0 # v5 = -;f3 e3 f1 e1 b3 a3 b1 a1 # v6 = -;h2 g2 h0 g0 d2 c2 d0 c0 # v7 = -;h3 g3 h1 g1 d3 c3 d1 c1 # TRN1 v0.8h, v4.8h, v6.8h # TRN2 v2.8h, v4.8h, v6.8h # TRN1 v1.8h, v5.8h, v7.8h # TRN2 v3.8h, v5.8h, v7.8h # v0 = -;h0 g0 f0 e0 d0 c0 b0 a0 # v1 = -;h1 g1 f1 e1 d1 c1 b1 a1 # v2 = -;h2 g2 f2 e2 d2 c2 b2 a2 # v3 = -;h3 g3 f3 e3 d3 c3 b3 a3 # Thus 1 8x4 blocks are transposed. TRN1 v4.16b, v0.16b, v1.16b TRN2 v5.16b, v0.16b, v1.16b TRN1 v6.16b, v2.16b, v3.16b TRN2 v7.16b, v2.16b, v3.16b TRN1 v0.8h, v4.8h, v6.8h TRN2 v2.8h, v4.8h, v6.8h TRN1 v1.8h, v5.8h, v7.8h TRN2 v3.8h, v5.8h, v7.8h ST1 {v0.8b}, [x4], 8 ST1 {v1.8b}, [x4], 8 ST1 {v2.8b}, [x4], 8 ST1 {v3.8b}, [x4] .p2align 4 2: RET END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
33,514
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c8x1-dq-packedA-aarch64-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #ifndef IGNORE_CODE_ALIGN_DIRECTIVES #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3 #else #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 #endif # Macro for separating instructions. For most builds, ; can be used, but for # ARM64 + Mach, ; begins a comment, and %% is used to separate instructions #if defined(__MACH__) #define XX %% #else #define XX ; #endif # params # c_stride # Args passed via stack. # TOS # |-----------| # |c_stride | 0 # |out ch indx| 8 # |params | 16 # |-----------| # void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) #define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\ BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\ XX\ STP d15, d14, [sp, -16] XX\ STP d13, d12, [sp, -32] XX\ STP d11, d10, [sp, -48] XX\ STP d9, d8, [sp, -64] XX\ XX\ MOV x11, x1 XX\ /* Load output channel index */ XX\ LDR x10, [sp, 8] XX\ /* Load params */ XX\ LDR x8, [sp, 16] XX\ XX\ /* Load a_zero_point */ XX\ LD1R {v24.8b}, [x8] XX\ ADD x8, x8, 8 XX\ XX\ /* Load pointer to per channel zero points array */ XX\ LDR x17, [x8], 8 XX\ XX\ /* Load pointer to per channel multiplier */ XX\ LDR x13, [x8] XX\ XX\ /* Add offset to the base pointer */ XX\ ADD x17, x17, x10 XX\ /* Mul by 4 to get byte offset for multiplier */ XX\ LSL x10, x10, 2 XX\ /* Add offset to the base pointer for multiplier */ XX\ ADD x13, x13, x10 XX\ XX\ /* Load b_zero_point */ XX\ LD1 {v25.8b}, [x17] XX\ /* Load multiplier c0123 */ XX\ LD1 {v26.4s}, [x13], 16 XX\ /* Load multiplier c4567 */ XX\ LD1 {v30.4s}, [x13] XX\ XX\ EOR x12, x12, x12 XX\ EOR x13, x13, x13 XX\ XX\ EOR v8.16b, v8.16b, v8.16b XX\ EOR v9.16b, v9.16b, v9.16b XX\ EOR v10.16b, v10.16b, v10.16b XX\ EOR v11.16b, v11.16b, v11.16b XX\ EOR v12.16b, v12.16b, v12.16b XX\ EOR v13.16b, v13.16b, v13.16b XX\ EOR v14.16b, v14.16b, v14.16b XX\ EOR v15.16b, v15.16b, v15.16b XX\ EOR v16.16b, v16.16b, v16.16b XX\ EOR v17.16b, v17.16b, v17.16b XX\ EOR v18.16b, v18.16b, v18.16b XX\ EOR v19.16b, v19.16b, v19.16b XX\ EOR v20.16b, v20.16b, v20.16b XX\ EOR v21.16b, v21.16b, v21.16b XX\ EOR v22.16b, v22.16b, v22.16b XX\ EOR v23.16b, v23.16b, v23.16b XX\ XX\ /* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\ /* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\ LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\ LOAD_INDEX_INSTRUCTION w13, [x4] XX\ /* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ XX\ /* This points to the first block of nonzero value */ XX\ /* for the nth row. */ XX\ ADD x10, x3, x12, LSL #3 XX\ /* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\ /* LSL for when elements are >1 byte */ XX\ /* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\ /* This points to the block id of the first block */ XX\ /* It should contain x13 - x12 number of block ids */ XX\ ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\ /* x8 = num_blocks that needs to be processed */ XX\ SUB x8, x13, x12 XX\ SUBS x8, x8, 2 XX\ B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\ k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\ /* k_loop processes two k values */ XX\ /* Load two 8x1 blocks */ XX\ LD1 {v0.8b}, [x10], 8 XX\ LD1 {v1.8b}, [x10], 8 XX\ USUBL v0.8h, v0.8b, v25.8b XX\ USUBL v1.8h, v1.8b, v25.8b XX\ XX\ /* x12 = block_id_ptr[0] */ XX\ /* x13 = block_id_ptr[1] */ XX\ LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\ LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\ /* Add offset to x2 */ XX\ /* Shift by 3 because each packed block is a block of 8x1 */ XX\ /* which 8 bytes */ XX\ ADD x16, x2, x12, LSL #3 XX\ ADD x17, x2, x13, LSL #3 XX\ XX\ /* Load two 8x1 blocks of activation */ XX\ /* First 8x1 for first channel */ XX\ /* second 8x1 for next channel */ XX\ LD1 {v2.8b}, [x16] XX\ LD1 {v3.8b}, [x17] XX\ XX\ USUBL v2.8h, v2.8b, v24.8b XX\ USUBL v3.8h, v3.8b, v24.8b XX\ XX\ /* First channel */ XX\ SMLAL v8.4s, v0.4h, v2.h[0] XX\ SMLAL2 v9.4s, v0.8h, v2.h[0] XX\ SMLAL v10.4s, v0.4h, v2.h[1] XX\ SMLAL2 v11.4s, v0.8h, v2.h[1] XX\ SMLAL v12.4s, v0.4h, v2.h[2] XX\ SMLAL2 v13.4s, v0.8h, v2.h[2] XX\ SMLAL v14.4s, v0.4h, v2.h[3] XX\ SMLAL2 v15.4s, v0.8h, v2.h[3] XX\ SMLAL v16.4s, v0.4h, v2.h[4] XX\ SMLAL2 v17.4s, v0.8h, v2.h[4] XX\ SMLAL v18.4s, v0.4h, v2.h[5] XX\ SMLAL2 v19.4s, v0.8h, v2.h[5] XX\ SMLAL v20.4s, v0.4h, v2.h[6] XX\ SMLAL2 v21.4s, v0.8h, v2.h[6] XX\ SMLAL v22.4s, v0.4h, v2.h[7] XX\ SMLAL2 v23.4s, v0.8h, v2.h[7] XX\ XX\ SUBS x8, x8, 2 XX\ /* Second channel */ XX\ SMLAL v8.4s, v1.4h, v3.h[0] XX\ SMLAL2 v9.4s, v1.8h, v3.h[0] XX\ SMLAL v10.4s, v1.4h, v3.h[1] XX\ SMLAL2 v11.4s, v1.8h, v3.h[1] XX\ SMLAL v12.4s, v1.4h, v3.h[2] XX\ SMLAL2 v13.4s, v1.8h, v3.h[2] XX\ SMLAL v14.4s, v1.4h, v3.h[3] XX\ SMLAL2 v15.4s, v1.8h, v3.h[3] XX\ SMLAL v16.4s, v1.4h, v3.h[4] XX\ SMLAL2 v17.4s, v1.8h, v3.h[4] XX\ SMLAL v18.4s, v1.4h, v3.h[5] XX\ SMLAL2 v19.4s, v1.8h, v3.h[5] XX\ SMLAL v20.4s, v1.4h, v3.h[6] XX\ SMLAL2 v21.4s, v1.8h, v3.h[6] XX\ SMLAL v22.4s, v1.4h, v3.h[7] XX\ SMLAL2 v23.4s, v1.8h, v3.h[7] XX\ XX\ B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ _1_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x8, -2 XX\ B.EQ _3_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ LD1 {v0.8b}, [x10] XX\ USUBL v0.8h, v0.8b, v25.8b XX\ XX\ /* x12 = block_id_ptr[0] */ XX\ LOAD_INDEX_INSTRUCTION w12, [x9] XX\ /* Add offset to x2 */ XX\ ADD x16, x2, x12, LSL #3 XX\ XX\ LD1 {v2.8b}, [x16] XX\ USUBL v2.8h, v2.8b, v24.8b XX\ XX\ SMLAL v8.4s, v0.4h, v2.h[0] XX\ SMLAL2 v9.4s, v0.8h, v2.h[0] XX\ SMLAL v10.4s, v0.4h, v2.h[1] XX\ SMLAL2 v11.4s, v0.8h, v2.h[1] XX\ SMLAL v12.4s, v0.4h, v2.h[2] XX\ SMLAL2 v13.4s, v0.8h, v2.h[2] XX\ SMLAL v14.4s, v0.4h, v2.h[3] XX\ SMLAL2 v15.4s, v0.8h, v2.h[3] XX\ SMLAL v16.4s, v0.4h, v2.h[4] XX\ SMLAL2 v17.4s, v0.8h, v2.h[4] XX\ SMLAL v18.4s, v0.4h, v2.h[5] XX\ SMLAL2 v19.4s, v0.8h, v2.h[5] XX\ SMLAL v20.4s, v0.4h, v2.h[6] XX\ SMLAL2 v21.4s, v0.8h, v2.h[6] XX\ SMLAL v22.4s, v0.4h, v2.h[7] XX\ SMLAL2 v23.4s, v0.8h, v2.h[7] XX\ XX\ NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\ _3_w##W_INDEX_DTYPE_NUM_BITS##: XX\ /* row 0: v8, v9 */ XX\ /* row 1: v10, v11 */ XX\ /* row 2: v12, v13 */ XX\ /* row 3: v14, v15 */ XX\ /* row 4: v16, v17 */ XX\ /* row 5: v18, v19 */ XX\ /* row 6: v20, v21 */ XX\ /* row 7: v22, v23 */ XX\ XX\ /* Load c_stride & params */ XX\ LDR x16, [sp] XX\ LSL x16, x16, 2 XX\ LD1 {v24.4s}, [x6], 16 XX\ LD1 {v25.4s}, [x6] XX\ XX\ SCVTF v8.4s, v8.4s XX\ SCVTF v9.4s, v9.4s XX\ SCVTF v10.4s, v10.4s XX\ SCVTF v11.4s, v11.4s XX\ SCVTF v12.4s, v12.4s XX\ SCVTF v13.4s, v13.4s XX\ SCVTF v14.4s, v14.4s XX\ SCVTF v15.4s, v15.4s XX\ SCVTF v16.4s, v16.4s XX\ SCVTF v17.4s, v17.4s XX\ SCVTF v18.4s, v18.4s XX\ SCVTF v19.4s, v19.4s XX\ SCVTF v20.4s, v20.4s XX\ SCVTF v21.4s, v21.4s XX\ SCVTF v22.4s, v22.4s XX\ SCVTF v23.4s, v23.4s XX\ XX\ FMUL v8.4s, v8.4s, v26.4s XX\ FMUL v9.4s, v9.4s, v30.4s XX\ FMUL v10.4s, v10.4s, v26.4s XX\ FMUL v11.4s, v11.4s, v30.4s XX\ FMUL v12.4s, v12.4s, v26.4s XX\ FMUL v13.4s, v13.4s, v30.4s XX\ FMUL v14.4s, v14.4s, v26.4s XX\ FMUL v15.4s, v15.4s, v30.4s XX\ FMUL v16.4s, v16.4s, v26.4s XX\ FMUL v17.4s, v17.4s, v30.4s XX\ FMUL v18.4s, v18.4s, v26.4s XX\ FMUL v19.4s, v19.4s, v30.4s XX\ FMUL v20.4s, v20.4s, v26.4s XX\ FMUL v21.4s, v21.4s, v30.4s XX\ FMUL v22.4s, v22.4s, v26.4s XX\ FMUL v23.4s, v23.4s, v30.4s XX\ XX\ FADD v8.4s, v8.4s, v24.4s XX\ FADD v9.4s, v9.4s, v25.4s XX\ FADD v10.4s, v10.4s, v24.4s XX\ FADD v11.4s, v11.4s, v25.4s XX\ FADD v12.4s, v12.4s, v24.4s XX\ FADD v13.4s, v13.4s, v25.4s XX\ FADD v14.4s, v14.4s, v24.4s XX\ FADD v15.4s, v15.4s, v25.4s XX\ FADD v16.4s, v16.4s, v24.4s XX\ FADD v17.4s, v17.4s, v25.4s XX\ FADD v18.4s, v18.4s, v24.4s XX\ FADD v19.4s, v19.4s, v25.4s XX\ FADD v20.4s, v20.4s, v24.4s XX\ FADD v21.4s, v21.4s, v25.4s XX\ FADD v22.4s, v22.4s, v24.4s XX\ FADD v23.4s, v23.4s, v25.4s XX\ XX\ /* Compute c0-c7 */ XX\ XX\ ADD x9, x7, x16 XX\ CMP x0, 2 XX\ CSEL x9, x7, x9, LO XX\ XX\ ADD x10, x9, x16 XX\ CSEL x10, x9, x10, LS XX\ XX\ ADD x8, x10, x16 XX\ CMP x0, 4 XX\ CSEL x8, x10, x8, LO XX\ XX\ ADD x12, x8, x16 XX\ CSEL x12, x8, x12, LS XX\ XX\ ADD x13, x12, x16 XX\ CMP x0, 6 XX\ CSEL x13, x12, x13, LO XX\ XX\ ADD x14, x13, x16 XX\ CSEL x14, x13, x14, LS XX\ XX\ ADD x15, x14, x16 XX\ CMP x0, 8 XX\ CSEL x15, x14, x15, NE XX\ XX\ CMP x11, 8 XX\ B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.4s}, [x7], 16 XX\ ST1 {v9.4s}, [x7] XX\ ST1 {v10.4s}, [x9], 16 XX\ ST1 {v11.4s}, [x9] XX\ ST1 {v12.4s}, [x10], 16 XX\ ST1 {v13.4s}, [x10] XX\ ST1 {v14.4s}, [x8], 16 XX\ ST1 {v15.4s}, [x8] XX\ ST1 {v16.4s}, [x12], 16 XX\ ST1 {v17.4s}, [x12] XX\ ST1 {v18.4s}, [x13], 16 XX\ ST1 {v19.4s}, [x13] XX\ ST1 {v20.4s}, [x14], 16 XX\ ST1 {v21.4s}, [x14] XX\ ST1 {v22.4s}, [x15], 16 XX\ ST1 {v23.4s}, [x15] XX\ XX\ LDP d9, d8, [sp, -64] XX\ LDP d11, d10, [sp, -48] XX\ LDP d13, d12, [sp, -32] XX\ LDP d15, d14, [sp, -16] XX\ XX\ RET XX\ XX\ NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\ _4_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x11, 4 XX\ B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.4s}, [x7], 16 XX\ ST1 {v10.4s}, [x9], 16 XX\ ST1 {v12.4s}, [x10], 16 XX\ ST1 {v14.4s}, [x8], 16 XX\ ST1 {v16.4s}, [x12], 16 XX\ ST1 {v18.4s}, [x13], 16 XX\ ST1 {v20.4s}, [x14], 16 XX\ ST1 {v22.4s}, [x15], 16 XX\ XX\ SUB x11, x11, 4 XX\ XX\ MOV v8.16b, v9.16b XX\ MOV v10.16b, v11.16b XX\ MOV v12.16b, v13.16b XX\ MOV v14.16b, v15.16b XX\ MOV v16.16b, v17.16b XX\ MOV v18.16b, v19.16b XX\ MOV v20.16b, v21.16b XX\ MOV v22.16b, v23.16b XX\ XX\ _5_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x11, 2 XX\ B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.2s}, [x7], 8 XX\ ST1 {v10.2s}, [x9], 8 XX\ ST1 {v12.2s}, [x10], 8 XX\ ST1 {v14.2s}, [x8], 8 XX\ ST1 {v16.2s}, [x12], 8 XX\ ST1 {v18.2s}, [x13], 8 XX\ ST1 {v20.2s}, [x14], 8 XX\ ST1 {v22.2s}, [x15], 8 XX\ XX\ SUB x11, x11, 2 XX\ XX\ EXT v8.16b, v8.16b, v8.16b, 8 XX\ EXT v10.16b, v10.16b, v10.16b, 8 XX\ EXT v12.16b, v12.16b, v12.16b, 8 XX\ EXT v14.16b, v14.16b, v14.16b, 8 XX\ EXT v16.16b, v16.16b, v16.16b, 8 XX\ EXT v18.16b, v18.16b, v18.16b, 8 XX\ EXT v20.16b, v20.16b, v20.16b, 8 XX\ EXT v22.16b, v22.16b, v22.16b, 8 XX\ XX\ _6_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x11, 1 XX\ B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.s}[0], [x7] XX\ ST1 {v10.s}[0], [x9] XX\ ST1 {v12.s}[0], [x10] XX\ ST1 {v14.s}[0], [x8] XX\ ST1 {v16.s}[0], [x12] XX\ ST1 {v18.s}[0], [x13] XX\ ST1 {v20.s}[0], [x14] XX\ ST1 {v22.s}[0], [x15] XX\ XX\ _7_w##W_INDEX_DTYPE_NUM_BITS##: XX\ LDP d9, d8, [sp, -64] XX\ LDP d11, d10, [sp, -48] XX\ LDP d13, d12, [sp, -32] XX\ LDP d15, d14, [sp, -16] XX\ XX\ RET XX\ XX\ END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon # void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w32__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint32_t* w_row_ptr, # const uint32_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR) # void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w16__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint16_t* w_row_ptr, # const uint16_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH) # void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w8__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint8_t* w_row_ptr, # const uint8_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB) #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif #undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 #undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 #undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 #undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON #undef XX
ShaoxunZeng/PyTorch-Medusa
39,799
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c1x4-dq-packedA-aarch64-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #ifndef IGNORE_CODE_ALIGN_DIRECTIVES #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3 #else #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 #define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 #endif # Macro for separating instructions. For most builds, ; can be used, but for # ARM64 + Mach, ; begins a comment, and %% is used to separate instructions #if defined(__MACH__) #define XX %% #else #define XX ; #endif .macro TRANSPOSE_4X4_S32 vin0, vin1, vin2, vin3, temp0, temp1, temp2, temp3 TRN1 \temp0\().4s, \vin0\().4s, \vin1\().4s TRN2 \temp1\().4s, \vin0\().4s, \vin1\().4s TRN1 \temp2\().4s, \vin2\().4s, \vin3\().4s TRN2 \temp3\().4s, \vin2\().4s, \vin3\().4s TRN1 \vin0\().2d, \temp0\().2d, \temp2\().2d TRN1 \vin1\().2d, \temp1\().2d, \temp3\().2d TRN2 \vin2\().2d, \temp0\().2d, \temp2\().2d TRN2 \vin3\().2d, \temp1\().2d, \temp3\().2d .endm # params # c_stride # Args passed via stack. # TOS # |-----------| # |c_stride | 0 # |out ch indx| 8 # |params | 16 # |-----------| # void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) #define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\ BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\ XX\ STP d15, d14, [sp, -16] XX\ STP d13, d12, [sp, -32] XX\ STP d11, d10, [sp, -48] XX\ STP d9, d8, [sp, -64] XX\ XX\ MOV x11, x1 XX\ /* Load output channel index */ XX\ LDR x10, [sp, 8] XX\ /* Load params */ XX\ LDR x8, [sp, 16] XX\ XX\ /* Load a_zero_point */ XX\ LD1R {v24.8b}, [x8] XX\ ADD x8, x8, 8 XX\ XX\ /* Load pointer to per channel zero points array */ XX\ LDR x17, [x8], 8 XX\ XX\ /* Load pointer to per channel multiplier */ XX\ LDR x13, [x8] XX\ XX\ /* Add offset to the base pointer */ XX\ ADD x17, x17, x10 XX\ /* Mul by 4 to get byte offset for multiplier */ XX\ LSL x10, x10, 2 XX\ /* Add offset to the base pointer for multiplier */ XX\ ADD x13, x13, x10 XX\ XX\ /* Load b_zero_point */ XX\ LD1 {v25.8b}, [x17] XX\ /* Load multiplier c0123 */ XX\ LD1 {v26.4s}, [x13], 16 XX\ /* Load multiplier c4567 */ XX\ LD1 {v30.4s}, [x13] XX\ XX\ EOR x12, x12, x12 XX\ EOR x13, x13, x13 XX\ XX\ CMP x1, 1 XX\ B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\ _0_w##W_INDEX_DTYPE_NUM_BITS##: XX\ /* v8 := zero */ XX\ EOR v8.16b, v8.16b, v8.16b XX\ /* v9 := zero */ XX\ EOR v9.16b, v9.16b, v9.16b XX\ XX\ DUP v29.8b, v25.b[0] XX\ /* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\ /* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\ LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\ LOAD_INDEX_INSTRUCTION w13, [x4] XX\ /* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ XX\ /* This points to the first block of nonzero value */ XX\ /* for the nth row. */ XX\ ADD x10, x3, x12, LSL #2 XX\ /* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\ /* LSL for when elements are >1 byte */ XX\ /* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\ /* This points to the block id of the first block */ XX\ /* It should contain x13 - x12 number of block ids */ XX\ ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\ /* x8 = num_blocks that needs to be processed */ XX\ SUB x8, x13, x12 XX\ SUBS x8, x8, 2 XX\ B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\ /* b0-7 (channel 0) */ XX\ LD1 {v10.8b}, [x10], 8 XX\ USUBL v10.8h, v10.8b, v29.8b XX\ XX\ /* x12 = block_id_ptr[0] */ XX\ /* x13 = block_id_ptr[1] */ XX\ LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\ LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\ /* Add offset to x2 */ XX\ /* Shift by 5 because each packed block is a block of 8x4 */ XX\ /* which 32 bytes */ XX\ ADD x16, x2, x12, LSL #5 XX\ ADD x17, x2, x13, LSL #5 XX\ XX\ LD1 {v0.8b}, [x16], 8 XX\ LD1 {v1.8b}, [x16], 8 XX\ LD1 {v2.8b}, [x16], 8 XX\ LD1 {v3.8b}, [x16] XX\ LD1 {v4.8b}, [x17], 8 XX\ LD1 {v5.8b}, [x17], 8 XX\ LD1 {v6.8b}, [x17], 8 XX\ LD1 {v7.8b}, [x17] XX\ XX\ USUBL v0.8h, v0.8b, v24.8b XX\ USUBL v1.8h, v1.8b, v24.8b XX\ USUBL v2.8h, v2.8b, v24.8b XX\ USUBL v3.8h, v3.8b, v24.8b XX\ USUBL v4.8h, v4.8b, v24.8b XX\ USUBL v5.8h, v5.8b, v24.8b XX\ USUBL v6.8h, v6.8b, v24.8b XX\ USUBL v7.8h, v7.8b, v24.8b XX\ XX\ SMLAL v8.4s, v0.4h, v10.h[0] XX\ SMLAL2 v9.4s, v0.8h, v10.h[0] XX\ SMLAL v8.4s, v1.4h, v10.h[1] XX\ SMLAL2 v9.4s, v1.8h, v10.h[1] XX\ SMLAL v8.4s, v2.4h, v10.h[2] XX\ SMLAL2 v9.4s, v2.8h, v10.h[2] XX\ SMLAL v8.4s, v3.4h, v10.h[3] XX\ SMLAL2 v9.4s, v3.8h, v10.h[3] XX\ SMLAL v8.4s, v4.4h, v10.h[4] XX\ SMLAL2 v9.4s, v4.8h, v10.h[4] XX\ SMLAL v8.4s, v5.4h, v10.h[5] XX\ SMLAL2 v9.4s, v5.8h, v10.h[5] XX\ SMLAL v8.4s, v6.4h, v10.h[6] XX\ SMLAL2 v9.4s, v6.8h, v10.h[6] XX\ SUBS x8, x8, 2 XX\ SMLAL v8.4s, v7.4h, v10.h[7] XX\ SMLAL2 v9.4s, v7.8h, v10.h[7] XX\ XX\ XX\ B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ _1_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x8, -2 XX\ B.EQ _2_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ /* b0-7 (channel 0) */ XX\ LD1R {v10.4s}, [x10] XX\ USUBL v10.8h, v10.8b, v29.8b XX\ XX\ /* x12 = block_id_ptr[0] */ XX\ LOAD_INDEX_INSTRUCTION w12, [x9] XX\ /* Add offset to x2 */ XX\ /* Shift by 5 because each packed block is a block of 8x4 */ XX\ /* which 32 bytes */ XX\ ADD x16, x2, x12, LSL #5 XX\ XX\ LD1 {v0.8b}, [x16], 8 XX\ LD1 {v1.8b}, [x16], 8 XX\ LD1 {v2.8b}, [x16], 8 XX\ LD1 {v3.8b}, [x16] XX\ XX\ USUBL v0.8h, v0.8b, v24.8b XX\ USUBL v1.8h, v1.8b, v24.8b XX\ USUBL v2.8h, v2.8b, v24.8b XX\ USUBL v3.8h, v3.8b, v24.8b XX\ XX\ SMLAL v8.4s, v0.4h, v10.h[0] XX\ SMLAL2 v9.4s, v0.8h, v10.h[0] XX\ SMLAL v8.4s, v1.4h, v10.h[1] XX\ SMLAL2 v9.4s, v1.8h, v10.h[1] XX\ SMLAL v8.4s, v2.4h, v10.h[2] XX\ SMLAL2 v9.4s, v2.8h, v10.h[2] XX\ SMLAL v8.4s, v3.4h, v10.h[3] XX\ SMLAL2 v9.4s, v3.8h, v10.h[3] XX\ XX\ NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\ _2_w##W_INDEX_DTYPE_NUM_BITS##: XX\ /* Store result on stack */ XX\ XX\ /* -64 because all d8-d15 are on stack */ XX\ /* + 256 bytes of buffer when nr = 1 */ XX\ /* 256 because we are doing 8x8 block with each value being 4 bytes */ XX\ /* Thus 64 * 4 = 256 */ XX\ /* 256 + 64 = 320 */ XX\ /* This is needed because after processing all nrs we will */ XX\ /* load 256 bytes from stack. */ XX\ /* Thus we will load accumulators back in v8, v9, v10, v11, v12, v13, v14, v15 */ XX\ /* v16, v17, v18, v19, v20, v21, v22, v23 */ XX\ /* When nr < 8, say nr = 1, extra v values will be fetched from stack which may overlap */ XX\ /* with other parts of stack storing local variables. To avoid that we just */ XX\ /* create a buffer of 256 bytes inbetween to make sure pointer increment */ XX\ /* never produces address that is beyond the stack frame of this function. */ XX\ SUB x9, sp, 320 XX\ /* Each iteration produce 8 values each of 4 bytes */ XX\ /* Thus 8 x 4 = 32 bytes 2^5 */ XX\ /* In this implementation, first value will be stored at */ XX\ /* 1st value: sp - 64 - r1 * 32 */ XX\ /* 2nd value: sp - 12 - (r1 - 1) * 32 */ XX\ /* and so on. */ XX\ SUB x9, x9, x1, LSL #5 XX\ ST1 {v8.4s}, [x9], 16 XX\ ST1 {v9.4s}, [x9] XX\ XX\ /* Shift zero point vector by 8 to load */ XX\ /* zero point of the next channel */ XX\ SRI v25.2d, v25.2d, #8 XX\ /* Check if nr >=1 */ XX\ SUBS x1, x1, 1 XX\ BHI _0_w##W_INDEX_DTYPE_NUM_BITS XX\ _3_w##W_INDEX_DTYPE_NUM_BITS##: XX\ /* First load all the accumulators from stack */ XX\ /* Load nr */ XX\ SUB x9, sp, 320 XX\ SUB x9, x9, x11, LSL #5 XX\ /* Now load v8-v15 */ XX\ /* This is 8x4 block (nrxmr) */ XX\ /* We will transpose this to 4x8 (mrxnr) */ XX\ /* v8, v9 : x00, x10, x20, x30; x40, x50, x60, x70 */ XX\ /* v10, v11 : x01, x11, x21, x31; x41, x51, x61, x71 */ XX\ /* v12, v13 : x02, x12, x22, x32; x42, x52, x62, x72 */ XX\ /* v14, v15 : x03, x13, x23, x33; x43, x53, x63, x73 */ XX\ /* */ XX\ /* v16, v17 : x04, x14, x24, x34; x44, x54, x64, x74 */ XX\ /* v18, v19 : x05, x15, x25, x35; x45, x55, x65, x75 */ XX\ /* v20, v21 : x06, x16, x26, x36; x46, x56, x66, x76 */ XX\ /* v22, v23 : x07, x17, x27, x37; x47, x57, x67, x77 */ XX\ LD1 {v8.4s}, [x9], 16 XX\ LD1 {v9.4s}, [x9], 16 XX\ LD1 {v10.4s}, [x9], 16 XX\ LD1 {v11.4s}, [x9], 16 XX\ LD1 {v12.4s}, [x9], 16 XX\ LD1 {v13.4s}, [x9], 16 XX\ LD1 {v14.4s}, [x9], 16 XX\ LD1 {v15.4s}, [x9], 16 XX\ LD1 {v16.4s}, [x9], 16 XX\ LD1 {v17.4s}, [x9], 16 XX\ LD1 {v18.4s}, [x9], 16 XX\ LD1 {v19.4s}, [x9], 16 XX\ LD1 {v20.4s}, [x9], 16 XX\ LD1 {v21.4s}, [x9], 16 XX\ LD1 {v22.4s}, [x9], 16 XX\ LD1 {v23.4s}, [x9] XX\ XX\ /* We can tranpose one 4x4 block using macro */ XX\ /* TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 */ XX\ /* After this we have */ XX\ /* v8 : x00, x01, x02, x03 */ XX\ /* v10 : x10, x11, x12, x13 */ XX\ /* v12 : x20, x21, x22, x23 */ XX\ /* v14 : x30, x31, x32, x33 */ XX\ /* Then using */ XX\ /* TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 */ XX\ /* We get */ XX\ /* v16 : x04, x05, x06, x07 */ XX\ /* v18 : x14, x15, x16, x17 */ XX\ /* v20 : x24, x25, x26, x27 */ XX\ /* v22 : x34, x35, x36, x37 */ XX\ /* Similarly we can transpose other two 4x4 blocks and we get */ XX\ /* tranposed 8x8 */ XX\ XX\ TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 XX\ TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 XX\ TRANSPOSE_4X4_S32 v9, v11, v13, v15, v0, v1, v2, v3 XX\ TRANSPOSE_4X4_S32 v17, v19, v21, v23, v4, v5, v6, v7 XX\ XX\ /* row 0: v8, v16 */ XX\ /* row 1: v10, v18 */ XX\ /* row 2: v12, v20 */ XX\ /* row 3: v14, v22 */ XX\ /* row 4: v9, v17 */ XX\ /* row 5: v11, v19 */ XX\ /* row 6: v13, v21 */ XX\ /* row 7: v15, v23 */ XX\ XX\ /* Load c_stride & params */ XX\ LDR x16, [sp] XX\ LSL x16, x16, 2 XX\ LD1 {v24.4s}, [x6], 16 XX\ LD1 {v25.4s}, [x6] XX\ XX\ SCVTF v8.4s, v8.4s XX\ SCVTF v9.4s, v9.4s XX\ SCVTF v10.4s, v10.4s XX\ SCVTF v11.4s, v11.4s XX\ SCVTF v12.4s, v12.4s XX\ SCVTF v13.4s, v13.4s XX\ SCVTF v14.4s, v14.4s XX\ SCVTF v15.4s, v15.4s XX\ SCVTF v16.4s, v16.4s XX\ SCVTF v17.4s, v17.4s XX\ SCVTF v18.4s, v18.4s XX\ SCVTF v19.4s, v19.4s XX\ SCVTF v20.4s, v20.4s XX\ SCVTF v21.4s, v21.4s XX\ SCVTF v22.4s, v22.4s XX\ SCVTF v23.4s, v23.4s XX\ XX\ FMUL v8.4s, v8.4s, v26.4s XX\ FMUL v16.4s, v16.4s, v30.4s XX\ FMUL v10.4s, v10.4s, v26.4s XX\ FMUL v18.4s, v18.4s, v30.4s XX\ FMUL v12.4s, v12.4s, v26.4s XX\ FMUL v20.4s, v20.4s, v30.4s XX\ FMUL v14.4s, v14.4s, v26.4s XX\ FMUL v22.4s, v22.4s, v30.4s XX\ FMUL v9.4s, v9.4s, v26.4s XX\ FMUL v17.4s, v17.4s, v30.4s XX\ FMUL v11.4s, v11.4s, v26.4s XX\ FMUL v19.4s, v19.4s, v30.4s XX\ FMUL v13.4s, v13.4s, v26.4s XX\ FMUL v21.4s, v21.4s, v30.4s XX\ FMUL v15.4s, v15.4s, v26.4s XX\ FMUL v23.4s, v23.4s, v30.4s XX\ XX\ FADD v8.4s, v8.4s, v24.4s XX\ FADD v16.4s, v16.4s, v25.4s XX\ FADD v10.4s, v10.4s, v24.4s XX\ FADD v18.4s, v18.4s, v25.4s XX\ FADD v12.4s, v12.4s, v24.4s XX\ FADD v20.4s, v20.4s, v25.4s XX\ FADD v14.4s, v14.4s, v24.4s XX\ FADD v22.4s, v22.4s, v25.4s XX\ FADD v9.4s, v9.4s, v24.4s XX\ FADD v17.4s, v17.4s, v25.4s XX\ FADD v11.4s, v11.4s, v24.4s XX\ FADD v19.4s, v19.4s, v25.4s XX\ FADD v13.4s, v13.4s, v24.4s XX\ FADD v21.4s, v21.4s, v25.4s XX\ FADD v15.4s, v15.4s, v24.4s XX\ FADD v23.4s, v23.4s, v25.4s XX\ XX\ /* Compute c0-c7 */ XX\ XX\ ADD x9, x7, x16 XX\ CMP x0, 2 XX\ CSEL x9, x7, x9, LO XX\ XX\ ADD x10, x9, x16 XX\ CSEL x10, x9, x10, LS XX\ XX\ ADD x8, x10, x16 XX\ CMP x0, 4 XX\ CSEL x8, x10, x8, LO XX\ XX\ ADD x12, x8, x16 XX\ CSEL x12, x8, x12, LS XX\ XX\ ADD x13, x12, x16 XX\ CMP x0, 6 XX\ CSEL x13, x12, x13, LO XX\ XX\ ADD x14, x13, x16 XX\ CSEL x14, x13, x14, LS XX\ XX\ ADD x15, x14, x16 XX\ CMP x0, 8 XX\ CSEL x15, x14, x15, NE XX\ XX\ CMP x11, 8 XX\ B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.4s}, [x7], 16 XX\ ST1 {v16.4s}, [x7] XX\ ST1 {v10.4s}, [x9], 16 XX\ ST1 {v18.4s}, [x9] XX\ ST1 {v12.4s}, [x10], 16 XX\ ST1 {v20.4s}, [x10] XX\ ST1 {v14.4s}, [x8], 16 XX\ ST1 {v22.4s}, [x8] XX\ ST1 {v9.4s}, [x12], 16 XX\ ST1 {v17.4s}, [x12] XX\ ST1 {v11.4s}, [x13], 16 XX\ ST1 {v19.4s}, [x13] XX\ ST1 {v13.4s}, [x14], 16 XX\ ST1 {v21.4s}, [x14] XX\ ST1 {v15.4s}, [x15], 16 XX\ ST1 {v23.4s}, [x15] XX\ XX\ LDP d9, d8, [sp, -64] XX\ LDP d11, d10, [sp, -48] XX\ LDP d13, d12, [sp, -32] XX\ LDP d15, d14, [sp, -16] XX\ XX\ RET XX\ XX\ NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\ _4_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x11, 4 XX\ B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.4s}, [x7], 16 XX\ ST1 {v10.4s}, [x9], 16 XX\ ST1 {v12.4s}, [x10], 16 XX\ ST1 {v14.4s}, [x8], 16 XX\ ST1 {v9.4s}, [x12], 16 XX\ ST1 {v11.4s}, [x13], 16 XX\ ST1 {v13.4s}, [x14], 16 XX\ ST1 {v15.4s}, [x15], 16 XX\ XX\ SUB x11, x11, 4 XX\ XX\ MOV v8.16b, v16.16b XX\ MOV v10.16b, v18.16b XX\ MOV v12.16b, v20.16b XX\ MOV v14.16b, v22.16b XX\ MOV v9.16b, v17.16b XX\ MOV v11.16b, v19.16b XX\ MOV v13.16b, v21.16b XX\ MOV v15.16b, v23.16b XX\ XX\ _5_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x11, 2 XX\ B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.2s}, [x7], 8 XX\ ST1 {v10.2s}, [x9], 8 XX\ ST1 {v12.2s}, [x10], 8 XX\ ST1 {v14.2s}, [x8], 8 XX\ ST1 {v9.2s}, [x12], 8 XX\ ST1 {v11.2s}, [x13], 8 XX\ ST1 {v13.2s}, [x14], 8 XX\ ST1 {v15.2s}, [x15], 8 XX\ XX\ SUB x11, x11, 2 XX\ XX\ EXT v8.16b, v8.16b, v8.16b, 8 XX\ EXT v10.16b, v10.16b, v10.16b, 8 XX\ EXT v12.16b, v12.16b, v12.16b, 8 XX\ EXT v14.16b, v14.16b, v14.16b, 8 XX\ EXT v9.16b, v9.16b, v9.16b, 8 XX\ EXT v11.16b, v11.16b, v11.16b, 8 XX\ EXT v13.16b, v13.16b, v13.16b, 8 XX\ EXT v15.16b, v15.16b, v15.16b, 8 XX\ XX\ _6_w##W_INDEX_DTYPE_NUM_BITS##: XX\ CMP x11, 1 XX\ B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\ XX\ ST1 {v8.s}[0], [x7] XX\ ST1 {v10.s}[0], [x9] XX\ ST1 {v12.s}[0], [x10] XX\ ST1 {v14.s}[0], [x8] XX\ ST1 {v9.s}[0], [x12] XX\ ST1 {v11.s}[0], [x13] XX\ ST1 {v13.s}[0], [x14] XX\ ST1 {v15.s}[0], [x15] XX\ XX\ _7_w##W_INDEX_DTYPE_NUM_BITS##: XX\ LDP d9, d8, [sp, -64] XX\ LDP d11, d10, [sp, -48] XX\ LDP d13, d12, [sp, -32] XX\ LDP d15, d14, [sp, -16] XX\ XX\ RET XX\ XX\ END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon # void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w32__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint32_t* w_row_ptr, # const uint32_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR) # void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w16__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint16_t* w_row_ptr, # const uint16_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH) # void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w8__aarch64_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint8_t* w_row_ptr, # const uint8_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB) #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif #undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 #undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 #undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 #undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON #undef XX
ShaoxunZeng/PyTorch-Medusa
26,974
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c8x1-dq-packedA-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> #ifndef __APPLE__ #define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon #else #define NDEF_APPLE_SYMBOLS #endif # r0 mr # r1 nr # r2 packed_a # r3 packed_w # d14 a_zero_point # d15 b_zero_point ## Stack # 4 a_stride # 4 packed_w # 4 w_row_ptr # 4 w_block_ids_ptr # 4 b # 4 c # 4 c_stride # 4 output channel index # 4 quantization_params # -- .syntax unified # Args passed via stack. # TOS # |----------------| # |packed_w | 0 # |w_row_ptr | 4 # |w_block_ids_ptr | 8 # |b | 12 # |c | 16 # |c_stride | 20 # |out ch indx | 24 # |params | 28 # |----------------| # # After loading w pointer in ip reg. # And after pushing r4-r9 and d8-d15 on stack # |----------------| # |d8 - d15 | 0 # |r4 - r11,lr | 64 # |w_row_ptr | 100 # |w_block_ids_ptr | 104 # |b | 108 # |c | 112 # |c_stride | 116 # |out ch indx | 120 # |params | 124 # |----------------| # # void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr, # const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) #define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\ BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\ .arm ;\ NDEF_APPLE_SYMBOLS ;\ ;\ PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\ VPUSH {d8-d15} ;\ ;\ /* Store nr in r11 as well for late user. */ ;\ MOV r11, r1 ;\ /* Load output channel index */ ;\ LDR r5, [sp, 120] ;\ /* Load quantization params */ ;\ /* - r7 = quantization_params */ ;\ LDR r7, [sp, 124] ;\ /* Load input_zero_point */ ;\ VLD1.8 {d14[]}, [r7] ;\ ADD r7, r7, 4 ;\ /* Load pointer to per channel zero points array */ ;\ LDR r4, [r7] ;\ /* Add output_channel_index to the b_zero_point pointer */ ;\ ADD r4, r4, r5 ;\ ;\ /* Load w_row_ptr + n */ ;\ LDR r5, [sp, 100] ;\ /* r7 = blocks_id_ptr */ ;\ LDR r7, [sp, 104] ;\ ;\ VEOR q8, q8, q8 ;\ VEOR q9, q9, q9 ;\ VEOR q10, q10, q10 ;\ VEOR q11, q11, q11 ;\ VEOR q12, q12, q12 ;\ VEOR q13, q13, q13 ;\ VEOR q14, q14, q14 ;\ VEOR q15, q15, q15 ;\ VLD1.8 {d15}, [r4] ;\ /* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\ /* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\ LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\ LOAD_INDEX_INSTRUCTION lr, [r5] ;\ /* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ ;\ /* * 8 because each block contains 8 values */ ;\ /* This points to the first block of nonzero value */ ;\ /* for the nth row. */ ;\ ADD r6, r3, ip, LSL #3 ;\ /* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\ /* LSL for when elements are >1 byte */ ;\ /* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\ /* This points to the col block id of the first block */ ;\ /* It should contain lr - ip number of block ids */ ;\ /* Note that in this kernel sparsity pattern is 8x1. */ ;\ /* Thus each block contains only 1 k as opposed to */ ;\ /* 1x4 where each block contains 4 k. */ ;\ ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\ /* r8 = num_blocks that needs to be processed */ ;\ SUB r8, lr, ip ;\ SUBS r8, r8, 2 ;\ BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ .p2align 5 ;\ k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\ /* Load 2 non zero blocks of weights. Each block = 8x1. */ ;\ VLD1.8 {d0}, [r6]! ;\ VLD1.8 {d2}, [r6]! ;\ ;\ /* ip = block_id_ptr[0] */ ;\ /* lr = block_id_ptr[1] */ ;\ LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\ LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\ ;\ /* Add offset to r2 */ ;\ /* Shift by 4 because each packed block is a block of 4x1 */ ;\ /* which 4 bytes */ ;\ ADD r10, r2, ip, LSL #2 ;\ /* q9 = vxb */ ;\ VSUBL.U8 q0, d0, d15 ;\ VSUBL.U8 q1, d2, d15 ;\ ;\ /* d4 = 4x1 transposed */ ;\ VLD1.32 {d4[]}, [r10] ;\ ;\ ADD r10, r2, lr, LSL #2 ;\ ;\ VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\ ;\ /* d5 = next 4x1 transposed */ ;\ VLD1.32 {d6[]}, [r10] ;\ ;\ VSUBL.U8 q3, d6, d14 /* vxa1_t */ ;\ ;\ /* q0 = d0, d1 = 8x1 block of weight for k */ ;\ /* q1 = d2, d3 = 8x1 block of weight for k + 1 */ ;\ /* q2's d4 = 4x1 block of activation for k */ ;\ /* q3's d6 = 4x1 block of activation for k + 1 */ ;\ ;\ /* Generate 4x8 block as two 4x4 blocks */ ;\ ;\ VMLAL.S16 q8, d0, d4[0] ;\ VMLAL.S16 q9, d1, d4[0] ;\ VMLAL.S16 q10, d0, d4[1] ;\ VMLAL.S16 q11, d1, d4[1] ;\ VMLAL.S16 q12, d0, d4[2] ;\ VMLAL.S16 q13, d1, d4[2] ;\ VMLAL.S16 q14, d0, d4[3] ;\ VMLAL.S16 q15, d1, d4[3] ;\ ;\ VMLAL.S16 q8, d2, d6[0] ;\ VMLAL.S16 q9, d3, d6[0] ;\ VMLAL.S16 q10, d2, d6[1] ;\ VMLAL.S16 q11, d3, d6[1] ;\ VMLAL.S16 q12, d2, d6[2] ;\ VMLAL.S16 q13, d3, d6[2] ;\ VMLAL.S16 q14, d2, d6[3] ;\ VMLAL.S16 q15, d3, d6[3] ;\ ;\ SUBS r8, r8, 2 ;\ ;\ BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\ _1_w##W_INDEX_DTYPE_NUM_BITS##: ;\ CMP r8, -2 ;\ BEQ _3_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ /* Load last nonzero block */ ;\ /* For this we will load 4 8 bit values as one 32 bit value */ ;\ VLD1.8 {d0}, [r6] ;\ /* q9 = vxb */ ;\ VSUBL.U8 q0, d0, d15 ;\ ;\ /* ip = block_id_ptr[0] */ ;\ LOAD_INDEX_INSTRUCTION ip, [r9] ;\ ;\ /* Add offset to r2 */ ;\ /* Shift by 4 because each packed block is a block of 4x1 */ ;\ /* which 4 bytes */ ;\ ADD r10, r2, ip, LSL #2 ;\ ;\ VLD1.32 {d4[]}, [r10]! ;\ ;\ VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\ ;\ VMLAL.S16 q8, d0, d4[0] ;\ VMLAL.S16 q9, d1, d4[0] ;\ VMLAL.S16 q10, d0, d4[1] ;\ VMLAL.S16 q11, d1, d4[1] ;\ VMLAL.S16 q12, d0, d4[2] ;\ VMLAL.S16 q13, d1, d4[2] ;\ VMLAL.S16 q14, d0, d4[3] ;\ VMLAL.S16 q15, d1, d4[3] ;\ ;\ ;\ .p2align 4 ;\ _3_w##W_INDEX_DTYPE_NUM_BITS##: ;\ /* Load output channel index */ ;\ LDR r5, [sp, 120] ;\ /* Load quantization params */ ;\ /* - r7 = quantization_params */ ;\ LDR r7, [sp, 124] ;\ ADD r7, r7, 8 ;\ /* Load pointer to per channel requant scale */ ;\ LDR r7, [r7] ;\ /* Now r7 has the base_addr + offset for multipliers */ ;\ ADD r7, r7, r5, LSL #2 ;\ ;\ LDR r6, [sp, 108] ;\ /* Load q6: vmultiplier_c0123 */ ;\ VLD1.32 {d12, d13}, [r7]! ;\ /* Load q7: vmultiplier_c4567 */ ;\ VLD1.32 {d14, d15}, [r7] ;\ VCVT.F32.S32 q8, q8 ;\ VCVT.F32.S32 q9, q9 ;\ VCVT.F32.S32 q10, q10 ;\ VLD1.32 {q0}, [r6]! ;\ VLD1.32 {q1}, [r6] ;\ ;\ VCVT.F32.S32 q11, q11 ;\ VCVT.F32.S32 q12, q12 ;\ VCVT.F32.S32 q13, q13 ;\ VCVT.F32.S32 q14, q14 ;\ VCVT.F32.S32 q15, q15 ;\ ;\ VMUL.F32 q8, q8, q6 ;\ VMUL.F32 q9, q9, q7 ;\ VMUL.F32 q10, q10, q6 ;\ VMUL.F32 q11, q11, q7 ;\ VMUL.F32 q12, q12, q6 ;\ VMUL.F32 q13, q13, q7 ;\ VMUL.F32 q14, q14, q6 ;\ VMUL.F32 q15, q15, q7 ;\ ;\ VADD.F32 q8, q8, q0 ;\ VADD.F32 q9, q9, q1 ;\ VADD.F32 q10, q10, q0 ;\ VADD.F32 q11, q11, q1 ;\ VADD.F32 q12, q12, q0 ;\ VADD.F32 q13, q13, q1 ;\ VADD.F32 q14, q14, q0 ;\ VADD.F32 q15, q15, q1 ;\ ;\ /* Load c, c_stride: */ ;\ /* - r1 = c */ ;\ /* - r9 = c_stride */ ;\ LDR r1, [sp, 112] ;\ LDR r9, [sp, 116] ;\ LSL r9, r9, 2 ;\ ;\ /* r1 = c0 = c pointer */ ;\ ;\ CMP r0, 2 ;\ /* r2 = c1 */ ;\ ADD r2, r1, r9 ;\ MOVLO r2, r1 ;\ ;\ /* r3 = c2 */ ;\ ADD r3, r2, r9 ;\ MOVLS r3, r2 ;\ ;\ CMP r0, 4 ;\ /* r4 = c3 */ ;\ ADD r4, r3, r9 ;\ MOVNE r4, r3 ;\ ;\ CMP r11, 8 ;\ BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {q8}, [r1]! ;\ VST1.32 {q10}, [r2]! ;\ VST1.32 {q12}, [r3]! ;\ VST1.32 {q14}, [r4]! ;\ VST1.32 {q9}, [r1] ;\ VST1.32 {q11}, [r2] ;\ VST1.32 {q13}, [r3] ;\ VST1.32 {q15}, [r4] ;\ ;\ VPOP {d8-d15} ;\ POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\ BX lr ;\ ;\ .p2align 3 ;\ _4_w##W_INDEX_DTYPE_NUM_BITS##: ;\ CMP r11, 4 ;\ BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {q8}, [r1]! ;\ VST1.32 {q10}, [r2]! ;\ VST1.32 {q12}, [r3]! ;\ VST1.32 {q14}, [r4]! ;\ ;\ SUB r11, 4 ;\ ;\ VMOV.32 q8, q9 ;\ VMOV.32 q10, q11 ;\ VMOV.32 q12, q13 ;\ VMOV.32 q14, q15 ;\ ;\ _5_w##W_INDEX_DTYPE_NUM_BITS##: ;\ CMP r11, 2 ;\ BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {d16}, [r1]! ;\ VST1.32 {d20}, [r2]! ;\ VST1.32 {d24}, [r3]! ;\ VST1.32 {d28}, [r4]! ;\ ;\ SUB r11, 2 ;\ ;\ VEXT.32 q8, q8, 2 ;\ VEXT.32 q10, q10, 2 ;\ VEXT.32 q12, q12, 2 ;\ VEXT.32 q14, q14, 2 ;\ ;\ _6_w##W_INDEX_DTYPE_NUM_BITS##: ;\ TEQ r11, 0 ;\ BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\ ;\ VST1.32 {d16[0]}, [r1] ;\ VST1.32 {d20[0]}, [r2] ;\ VST1.32 {d24[0]}, [r3] ;\ VST1.32 {d28[0]}, [r4] ;\ ;\ _7_w##W_INDEX_DTYPE_NUM_BITS##: ;\ VPOP {d8-d15} ;\ POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\ BX lr ;\ ;\ END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon # void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w32__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint32_t* w_row_ptr, # const uint32_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR) # void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w16__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint16_t* w_row_ptr, # const uint16_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH) # void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w8__aarch32_neon( # size_t mr, # size_t nr, # const uint8_t* a_packed, # const uint8_t* packed_w, # const uint8_t* w_row_ptr, # const uint8_t* w_block_ids_ptr, # const float* b, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1]) MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB) #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif #undef NDEF_APPLE_SYMBOLS #undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON
ShaoxunZeng/PyTorch-Medusa
27,616
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/8x8-aarch64-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> # Args passed via 8 registers (64 bytes) # x0: mr # x1: nr # x2: kc # x3: ks # x4: a # x5: w # x6: c # x7: c_stride # # Args passed via stack. # TOS # |-----------| # |out ch indx| 0 # |params | 8 # |-----------| # void pytorch_q8conv_ukernel_8x8__aarch64_neon( # size_t mr, # size_t nr, # size_t kc, # size_t ks, # const uint8_t** restrict a, # const void* restrict w, # uint8_t* restrict c, # size_t c_stride, # size_t output_channel_index, # const union pytorch_qnnp_q31_requantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon # Load params: x8 # Load output channel index: x9 # Note since this is an offset into a byte pointer # We do not need to multiply with size of pointer type LDP x9, x8, [sp] STP d15, d14, [sp, -16] STP d13, d12, [sp, -32] STP d11, d10, [sp, -48] STP d9, d8, [sp, -64] # Load bias0123, bias4567 LD1 {v8.4s, v9.4s}, [x5], 32 # Load pointer to per channel zero points array # And go to a_zero_point with post-index LDR x10, [x8], 8 # Add offset to the base pointer ADD x10, x10, x9 # v10 := vacc1x0123 MOV v10.16b, v8.16b # v11 := vacc1x4567 MOV v11.16b, v9.16b # Load b_zero_point LD1 {v25.8b}, [x10] # Load a_zero_point LD1R {v24.8b}, [x8] # Load pointer to per channel requant scale LDR x10, [x8, 8]! ADD x8, x8, 8 # v12 := vacc2x0123 MOV v12.16b, v8.16b # v13 := vacc2x4567 MOV v13.16b, v9.16b # v14 := vacc3x0123 MOV v14.16b, v8.16b # v15 := vacc3x4567 MOV v15.16b, v9.16b # v16 := vacc4x0123 MOV v16.16b, v8.16b # v17 := vacc4x4567 MOV v17.16b, v9.16b # v18 := vacc5x0123 MOV v18.16b, v8.16b # v19 := vacc5x4567 MOV v19.16b, v9.16b # v20 := vacc6x0123 MOV v20.16b, v8.16b # v21 := vacc6x4567 MOV v21.16b, v9.16b # v22 := vacc7x0123 MOV v22.16b, v8.16b # v23 := vacc7x4567 MOV v23.16b, v9.16b # Fold mul by 4 to get byte offset for requant scale. # Add offset to the base pointer ADD x10, x10, x9, lsl#2 // Load requantization_scale // - v26 = requantization_scale channels 0-3 // - v31 = requantization_scale channels 4-7 LD1 {v26.4s}, [x10], 16 LD1 {v30.4s}, [x10] #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 4 #endif 3: MOV x17, x2 LDR x16, [x4], 8 // a0 LDR x9, [x4], 8 // a1 LDR x10, [x4], 8 // a2 LDR x11, [x4], 8 // a3 LDR x12, [x4], 8 // a4 LDR x13, [x4], 8 // a5 LDR x14, [x4], 8 // a6 LDR x15, [x4], 8 // a7 SUBS x17, x17, 8 B.LO 1f #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 5 #endif 0: # b0-7 (channel 0) LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b # va0 - va7 := va - va_offset LD1 {v0.8b}, [x16], 8 SUB_ZERO_POINT v0.8h, v0.8b, v24.8b LD1 {v1.8b}, [x9], 8 SUB_ZERO_POINT v1.8h, v1.8b, v24.8b LD1 {v2.8b}, [x10], 8 SUB_ZERO_POINT v2.8h, v2.8b, v24.8b LD1 {v3.8b}, [x11], 8 SUB_ZERO_POINT v3.8h, v3.8b, v24.8b LD1 {v4.8b}, [x12], 8 SUB_ZERO_POINT v4.8h, v4.8b, v24.8b LD1 {v5.8b}, [x13], 8 SUB_ZERO_POINT v5.8h, v5.8b, v24.8b LD1 {v6.8b}, [x14], 8 SUB_ZERO_POINT v6.8h, v6.8b, v24.8b LD1 {v7.8b}, [x15], 8 SUB_ZERO_POINT v7.8h, v7.8b, v24.8b // b0-7 (channel 1) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0] SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0] SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0] SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0] SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0] SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0] SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0] SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0] SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0] SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0] SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0] SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0] SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0] SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0] SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0] // b0-7 (channel 2) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1] SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1] SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1] SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1] SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1] SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1] SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1] SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1] SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1] SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1] SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1] SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1] SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1] SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1] SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1] // b0-7 (channel 3) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2] SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2] SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2] SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2] SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2] SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2] SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2] SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2] SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2] SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2] SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2] SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2] SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2] SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2] SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2] // b0-7 (channel 4) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3] SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3] SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3] SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3] SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3] SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3] SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3] SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3] SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3] SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3] SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3] SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3] SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3] SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3] SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3] // b0-7 (channel 5) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4] SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4] SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4] SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4] SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4] SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4] SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4] SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4] SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4] SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4] SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4] SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4] SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4] SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4] SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4] // b0-7 (channel 6) LD1 {v27.8b}, [x5], 8 SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5] SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5] SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5] SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5] SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5] SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5] SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5] SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5] USUBL v27.8h, v27.8b, v25.8b SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5] SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5] SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5] SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5] SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5] SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5] SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5] SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5] // b0-7 (channel 7) LD1 {v28.8b}, [x5], 8 SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6] SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6] SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6] SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6] SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6] SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6] SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6] SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6] USUBL v28.8h, v28.8b, v25.8b SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6] SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6] SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6] SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6] SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6] SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6] SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6] SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6] SUBS x17, x17, 8 SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7] SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7] SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7] SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7] SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7] SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7] SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7] SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7] SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7] SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7] SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7] SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7] SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7] SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7] SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7] SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7] B.HS 0b 1: CMP x17, -8 B.EQ 2f // Adjust a0-a7 ADD x16, x16, x17 ADD x9, x9, x17 ADD x10, x10, x17 ADD x11, x11, x17 ADD x12, x12, x17 ADD x13, x13, x17 ADD x14, x14, x17 ADD x15, x15, x17 // a_shift = 8 * k - 64 LSL x17, x17, 3 FMOV d29, x17 USHL d31, d24, d29 // Load x0-a7 LD1 {v0.8b}, [x16], 8 USHL d0, d0, d29 SUB_ZERO_POINT v0.8h, v0.8b, v24.8b LD1 {v1.8b}, [x9], 8 USHL d1, d1, d29 SUB_ZERO_POINT v1.8h, v1.8b, v24.8b LD1 {v2.8b}, [x10], 8 USHL d2, d2, d29 SUB_ZERO_POINT v2.8h, v2.8b, v24.8b LD1 {v3.8b}, [x11], 8 USHL d3, d3, d29 SUB_ZERO_POINT v3.8h, v3.8b, v24.8b LD1 {v4.8b}, [x12], 8 USHL d4, d4, d29 SUB_ZERO_POINT v4.8h, v4.8b, v24.8b LD1 {v5.8b}, [x13], 8 USHL d5, d5, d29 SUB_ZERO_POINT v5.8h, v5.8b, v24.8b LD1 {v6.8b}, [x14], 8 USHL d6, d6, d29 SUB_ZERO_POINT v6.8h, v6.8b, v24.8b LD1 {v7.8b}, [x15], 8 USHL d7, d7, d29 SUB_ZERO_POINT v7.8h, v7.8b, v24.8b // Channel 0 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0] SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0] SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0] SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0] SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0] SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0] SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0] SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0] SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0] SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0] SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0] SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0] SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0] SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0] SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0] SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0] CMP x17, -48 B.LO 2f // Channel 1 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1] SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1] SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1] SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1] SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1] SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1] SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1] SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1] SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1] SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1] SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1] SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1] SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1] SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1] SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1] SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1] B.LS 2f // Channel 2 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2] SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2] SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2] SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2] SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2] SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2] SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2] SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2] SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2] SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2] SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2] SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2] SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2] SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2] SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2] SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2] CMP x17, -32 B.LO 2f // Channel 3 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3] SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3] SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3] SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3] SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3] SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3] SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3] SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3] SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3] SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3] SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3] SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3] SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3] SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3] SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3] SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3] B.LS 2f // Channel 4 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4] SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4] SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4] SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4] SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4] SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4] SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4] SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4] SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4] SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4] SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4] SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4] SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4] SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4] SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4] SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4] CMP x17, -16 B.LO 2f // Channel 5 LD1 {v28.8b}, [x5], 8 USUBL v28.8h, v28.8b, v25.8b SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5] SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5] SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5] SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5] SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5] SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5] SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5] SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5] SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5] SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5] SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5] SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5] SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5] SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5] SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5] SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5] B.LS 2f // Channel 6 LD1 {v27.8b}, [x5], 8 USUBL v27.8h, v27.8b, v25.8b SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6] SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6] SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6] SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6] SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6] SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6] SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6] SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6] SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6] SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6] SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6] SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6] SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6] SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6] SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6] SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6] #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 4 #endif 2: SUB x3, x3, 1 CBNZ x3, 3b // Load zero_point: // - v29 = vzero_point LD1R {v29.8h}, [x8], 2 SCVTF v8.4s, v8.4s SCVTF v9.4s, v9.4s SCVTF v10.4s, v10.4s SCVTF v11.4s, v11.4s SCVTF v12.4s, v12.4s SCVTF v13.4s, v13.4s SCVTF v14.4s, v14.4s SCVTF v15.4s, v15.4s SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v8.4s, v8.4s, v26.4s FMUL v9.4s, v9.4s, v30.4s FMUL v10.4s, v10.4s, v26.4s FMUL v11.4s, v11.4s, v30.4s FMUL v12.4s, v12.4s, v26.4s FMUL v13.4s, v13.4s, v30.4s FMUL v14.4s, v14.4s, v26.4s FMUL v15.4s, v15.4s, v30.4s FMUL v16.4s, v16.4s, v26.4s FMUL v17.4s, v17.4s, v30.4s FMUL v18.4s, v18.4s, v26.4s FMUL v19.4s, v19.4s, v30.4s FMUL v20.4s, v20.4s, v26.4s FMUL v21.4s, v21.4s, v30.4s FMUL v22.4s, v22.4s, v26.4s FMUL v23.4s, v23.4s, v30.4s // Load max: // - v30 = vmax LD1R {v30.16b}, [x8], 1 // Load min: // - v31 = vmin LD1R {v31.16b}, [x8] FCVTNS v8.4s, v8.4s FCVTNS v9.4s, v9.4s FCVTNS v10.4s, v10.4s FCVTNS v11.4s, v11.4s FCVTNS v12.4s, v12.4s FCVTNS v13.4s, v13.4s FCVTNS v14.4s, v14.4s FCVTNS v15.4s, v15.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s SQXTN v8.4h, v8.4s SQXTN v10.4h, v10.4s SQXTN v12.4h, v12.4s SQXTN v14.4h, v14.4s SQXTN v16.4h, v16.4s SQXTN v18.4h, v18.4s SQXTN v20.4h, v20.4s SQXTN v22.4h, v22.4s SQXTN2 v8.8h, v9.4s SQXTN2 v10.8h, v11.4s SQXTN2 v12.8h, v13.4s SQXTN2 v14.8h, v15.4s SQXTN2 v16.8h, v17.4s SQXTN2 v18.8h, v19.4s SQXTN2 v20.8h, v21.4s SQXTN2 v22.8h, v23.4s SQADD v8.8h, v8.8h, v29.8h SQADD v10.8h, v10.8h, v29.8h SQADD v12.8h, v12.8h, v29.8h SQADD v14.8h, v14.8h, v29.8h SQADD v16.8h, v16.8h, v29.8h SQADD v18.8h, v18.8h, v29.8h SQADD v20.8h, v20.8h, v29.8h SQADD v22.8h, v22.8h, v29.8h SQXTUN v8.8b, v8.8h SQXTUN v12.8b, v12.8h SQXTUN v16.8b, v16.8h SQXTUN v20.8b, v20.8h SQXTUN2 v8.16b, v10.8h SQXTUN2 v12.16b, v14.8h SQXTUN2 v16.16b, v18.8h SQXTUN2 v20.16b, v22.8h UMIN v8.16b, v8.16b, v30.16b UMIN v12.16b, v12.16b, v30.16b UMIN v16.16b, v16.16b, v30.16b UMIN v20.16b, v20.16b, v30.16b UMAX v8.16b, v8.16b, v31.16b UMAX v12.16b, v12.16b, v31.16b UMAX v16.16b, v16.16b, v31.16b UMAX v20.16b, v20.16b, v31.16b // Compute c0-c7 ADD x9, x6, x7 CMP x0, 2 CSEL x9, x6, x9, LO ADD x10, x9, x7 CSEL x10, x9, x10, LS ADD x11, x10, x7 CMP x0, 4 CSEL x11, x10, x11, LO ADD x12, x11, x7 CSEL x12, x11, x12, LS ADD x13, x12, x7 CMP x0, 6 CSEL x13, x12, x13, LO ADD x14, x13, x7 CSEL x14, x13, x14, LS ADD x15, x14, x7 CMP x0, 8 CSEL x15, x14, x15, NE CMP x1, 8 B.NE 4f // Store results ST1 {v8.d}[0], [x6] ST1 {v8.d}[1], [x9] ST1 {v12.d}[0], [x10] ST1 {v12.d}[1], [x11] ST1 {v16.d}[0], [x12] ST1 {v16.d}[1], [x13] ST1 {v20.d}[0], [x14] ST1 {v20.d}[1], [x15] LDP d9, d8, [sp, -64] LDP d11, d10, [sp, -48] LDP d13, d12, [sp, -32] LDP d15, d14, [sp, -16] RET #ifndef IGNORE_CODE_ALIGN_DIRECTIVES .p2align 3 #endif 4: CMP x1, 4 B.LO 5f ST1 {v8.s}[0], [x6], 4 ST1 {v8.s}[2], [x9], 4 ST1 {v12.s}[0], [x10], 4 ST1 {v12.s}[2], [x11], 4 ST1 {v16.s}[0], [x12], 4 ST1 {v16.s}[2], [x13], 4 ST1 {v20.s}[0], [x14], 4 ST1 {v20.s}[2], [x15], 4 SUB x1, x1, 4 EXT v8.16b, v8.16b, v8.16b, 4 EXT v12.16b, v12.16b, v12.16b, 4 EXT v16.16b, v16.16b, v16.16b, 4 EXT v20.16b, v20.16b, v20.16b, 4 5: CMP x1, 2 B.LO 6f ST1 {v8.h}[0], [x6], 2 ST1 {v8.h}[4], [x9], 2 ST1 {v12.h}[0], [x10], 2 ST1 {v12.h}[4], [x11], 2 ST1 {v16.h}[0], [x12], 2 ST1 {v16.h}[4], [x13], 2 ST1 {v20.h}[0], [x14], 2 ST1 {v20.h}[4], [x15], 2 SUB x1, x1, 2 EXT v8.16b, v8.16b, v8.16b, 2 EXT v12.16b, v12.16b, v12.16b, 2 EXT v16.16b, v16.16b, v16.16b, 2 EXT v20.16b, v20.16b, v20.16b, 2 6: CMP x1, 1 B.LO 7f ST1 {v8.b}[0], [x6] ST1 {v8.b}[8], [x9] ST1 {v12.b}[0], [x10] ST1 {v12.b}[8], [x11] ST1 {v16.b}[0], [x12] ST1 {v16.b}[8], [x13] ST1 {v20.b}[0], [x14] ST1 {v20.b}[8], [x15] 7: LDP d9, d8, [sp, -64] LDP d11, d10, [sp, -48] LDP d13, d12, [sp, -32] LDP d15, d14, [sp, -16] RET END_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
18,255
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/4x8-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> .syntax unified # Args passed via 4 registers (16 bytes) # r0: mr # r1: nr # r2: kc # r3: ks # # Args passed via stack. # TOS # |-----------| # |a | 0 # |w | 4 # |c | 8 # |c_stride | 12 # |out ch indx| 16 # |params | 20 # |-----------| # # After loading w pointer in ip reg. # And after pushing r4-r8 and d8-d15 on stack # |-----------| # |d8 - d15 | 0 # |r4 - r11 | 64 # |a | 96 # |w | 100 # |c | 104 # |c_stride | 108 # |out ch indx| 112 # |params | 116 # |-----------| # # void pytorch_q8conv_ukernel_4x8__aarch32_neon( # size_t mr, # size_t nr, # size_t kc, # size_t ks, # const uint8_t**restrict a, # const void*restrict w, # uint8_t*restrict c, # size_t c_stride, # const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load w # - ip = w LDR ip, [sp, 4] PUSH {r4, r5, r6, r7, r8, r9, r10, r11} # Load params: # - r9 = params LDR r9, [sp, 52] VPUSH {d8-d15} # Load bias0123, bias4567 VLDM ip!, {d16-d19} # Load a # - r8 = a LDR r8, [sp, 96] # Load output channel index LDR r5, [sp, 112] ADD r7, r9, 4 # Load pointer to per channel zero points array LDR r4, [r9], 8 # Load pointer to per channel requant scale # add 8 bytes to get to vfmax LDR r11, [r9], 8 # Load a_zero_point: # - d14 = a_zero_point VLD1.8 {d14[]}, [r7] # Byte offset of output channel index for requant scale. LSL r6, r5, 2 # Add offset to the base pointer ADD r5, r4, r5 # Store in r11 pointer from where to load requant scale. ADD r11, r11, r6 # q10 := vacc1x0123 VMOV.I32 q10, q8 # q11 := vacc1x4567 VMOV.I32 q11, q9 # q12 := vacc2x0123 VMOV.I32 q12, q8 # q13 := vacc2x4567 VMOV.I32 q13, q9 # q14 := vacc3x0123 VMOV.I32 q14, q8 # Load b_zero_point: # - d15 = b_zero_point VLD1.8 {d15}, [r5] # q15 := vacc3x4567 VMOV.I32 q15, q9 .p2align 5 0: SUBS r10, r2, 8 # Load a0, a1, a2, a3 # - r4 = a0 # - r5 = a1 # - r6 = a2 # - r7 = a3 LDM r8!, {r4-r7} BLO 2f 1: # Load va0 # - d1 = va0 VLD1.8 {d1}, [r4]! # Load va1 # - d3 = va1 VLD1.8 {d3}, [r5]! # Load vb0-vb7 (channel 0) # - d9 = vb0-vb7 VLD1.8 {d9}, [ip:64]! # Load va2 # - d5 = va2 VLD1.8 {d5}, [r6]! # q0 = va0 = a0 SUB_ZERO_POINT q0, d1, d14 # Load va3 # - d7 = va3 VLD1.8 {d7}, [r7]! # q1 = va1 = a1 SUB_ZERO_POINT q1, d3, d14 # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 0) # - d9 = vb4567 (channel 0) VSUBL.U8 q4, d9, d15 # q2 = va2 = a2 SUB_ZERO_POINT q2, d5, d14 # q3 = va3 = a3 SUB_ZERO_POINT q3, d7, d14 ### Channel 0 ### # Load b0-b7 (channel 1) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[0] VMLAL.S16 q8, d8, d0[0] # vacc0x4567 += vb4567 * va0[0] VMLAL.S16 q9, d9, d0[0] # vacc1x0123 += vb0123 * va1[0] VMLAL.S16 q10, d8, d2[0] # vacc1x4567 += vb4567 * va1[0] VMLAL.S16 q11, d9, d2[0] # vacc2x0123 += vb0123 * va2[0] VMLAL.S16 q12, d8, d4[0] # vacc2x4567 += vb4567 * va2[0] VMLAL.S16 q13, d9, d4[0] # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 1) # - d11 = vb4567 (channel 1) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[0] VMLAL.S16 q14, d8, d6[0] # vacc3x4567 += vb4567 * va3[0] VMLAL.S16 q15, d9, d6[0] ### Channel 1 ### # Load b0-b7 (channel 2) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[1] VMLAL.S16 q8, d10, d0[1] # vacc0x4567 += vb4567 * va0[1] VMLAL.S16 q9, d11, d0[1] # vacc1x0123 += vb0123 * va1[1] VMLAL.S16 q10, d10, d2[1] # vacc1x4567 += vb4567 * va1[1] VMLAL.S16 q11, d11, d2[1] # vacc2x0123 += vb0123 * va2[1] VMLAL.S16 q12, d10, d4[1] # vacc2x4567 += vb4567 * va2[1] VMLAL.S16 q13, d11, d4[1] # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 2) # - d9 = vb4567 (channel 2) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[1] VMLAL.S16 q14, d10, d6[1] # vacc3x4567 += vb4567 * va3[1] VMLAL.S16 q15, d11, d6[1] ### Channel 2 ### # Load b0-b7 (channel 3) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[2] VMLAL.S16 q8, d8, d0[2] # vacc0x4567 += vb4567 * va0[2] VMLAL.S16 q9, d9, d0[2] # vacc1x0123 += vb0123 * va1[2] VMLAL.S16 q10, d8, d2[2] # vacc1x4567 += vb4567 * va1[2] VMLAL.S16 q11, d9, d2[2] # vacc2x0123 += vb0123 * va2[2] VMLAL.S16 q12, d8, d4[2] # vacc2x4567 += vb4567 * va2[2] VMLAL.S16 q13, d9, d4[2] # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 3) # - d11 = vb4567 (channel 3) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[2] VMLAL.S16 q14, d8, d6[2] # vacc3x4567 += vb4567 * va3[2] VMLAL.S16 q15, d9, d6[2] ### Channel 3 ### # Load b0-b7 (channel 4) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[3] VMLAL.S16 q8, d10, d0[3] # vacc0x4567 += vb4567 * va0[3] VMLAL.S16 q9, d11, d0[3] # vacc1x0123 += vb0123 * va1[3] VMLAL.S16 q10, d10, d2[3] # vacc1x4567 += vb4567 * va1[3] VMLAL.S16 q11, d11, d2[3] # vacc2x0123 += vb0123 * va2[3] VMLAL.S16 q12, d10, d4[3] # vacc2x4567 += vb4567 * va2[3] VMLAL.S16 q13, d11, d4[3] # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 4) # - d11 = vb4567 (channel 4) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[3] VMLAL.S16 q14, d10, d6[3] # vacc3x4567 += vb4567 * va3[3] VMLAL.S16 q15, d11, d6[3] ### Channel 4 ### # Load b0-b7 (channel 5) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[4] VMLAL.S16 q8, d8, d1[0] # vacc0x4567 += vb4567 * va0[4] VMLAL.S16 q9, d9, d1[0] # vacc1x0123 += vb0123 * va1[4] VMLAL.S16 q10, d8, d3[0] # vacc1x4567 += vb4567 * va1[4] VMLAL.S16 q11, d9, d3[0] # vacc2x0123 += vb0123 * va2[4] VMLAL.S16 q12, d8, d5[0] # vacc2x4567 += vb4567 * va2[4] VMLAL.S16 q13, d9, d5[0] # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 5) # - d9 = vb4567 (channel 5) VSUBL.U8 q5, d11, d15 # vacc3x0123 += vb0123 * va3[4] VMLAL.S16 q14, d8, d7[0] # vacc3x4567 += vb4567 * va3[4] VMLAL.S16 q15, d9, d7[0] ### Channel 5 ### # Load b0-b7 (channel 6) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # vacc0x0123 += vb0123 * va0[5] VMLAL.S16 q8, d10, d1[1] # vacc0x4567 += vb4567 * va0[5] VMLAL.S16 q9, d11, d1[1] # vacc1x0123 += vb0123 * va1[5] VMLAL.S16 q10, d10, d3[1] # vacc1x4567 += vb4567 * va1[5] VMLAL.S16 q11, d11, d3[1] # vacc2x0123 += vb0123 * va2[5] VMLAL.S16 q12, d10, d5[1] # vacc2x4567 += vb4567 * va2[5] VMLAL.S16 q13, d11, d5[1] # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 6) # - d9 = vb4567 (channel 6) VSUBL.U8 q4, d9, d15 # vacc3x0123 += vb0123 * va3[5] VMLAL.S16 q14, d10, d7[1] # vacc3x4567 += vb4567 * va3[5] VMLAL.S16 q15, d11, d7[1] ### Channel 6 ### # Load b0-b7 (channel 7) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # vacc0x0123 += vb0123 * va0[6] VMLAL.S16 q8, d8, d1[2] # vacc0x4567 += vb4567 * va0[6] VMLAL.S16 q9, d9, d1[2] # vacc1x0123 += vb0123 * va1[6] VMLAL.S16 q10, d8, d3[2] # vacc1x4567 += vb4567 * va1[6] VMLAL.S16 q11, d9, d3[2] # vacc2x0123 += vb0123 * va2[6] VMLAL.S16 q12, d8, d5[2] # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 7) # - d11 = vb4567 (channel 7) VSUBL.U8 q5, d11, d15 # vacc2x4567 += vb4567 * va2[6] VMLAL.S16 q13, d9, d5[2] # vacc3x0123 += vb0123 * va3[6] VMLAL.S16 q14, d8, d7[2] # vacc3x4567 += vb4567 * va3[6] VMLAL.S16 q15, d9, d7[2] ### Channel 8 ### SUBS r10, r10, 8 # vacc0x0123 += vb0123 * va0[7] VMLAL.S16 q8, d10, d1[3] # vacc0x4567 += vb4567 * va0[7] VMLAL.S16 q9, d11, d1[3] # vacc1x0123 += vb0123 * va1[7] VMLAL.S16 q10, d10, d3[3] # vacc1x4567 += vb4567 * va1[7] VMLAL.S16 q11, d11, d3[3] # vacc2x0123 += vb0123 * va2[7] VMLAL.S16 q12, d10, d5[3] # vacc2x4567 += vb4567 * va2[7] VMLAL.S16 q13, d11, d5[3] # vacc3x0123 += vb0123 * va3[7] VMLAL.S16 q14, d10, d7[3] # vacc3x4567 += vb4567 * va3[7] VMLAL.S16 q15, d11, d7[3] BHS 1b 2: CMP r10, -8 BEQ 3f # Adjust a0, a1, a2, a3 ADD r4, r10 ADD r5, r10 ADD r6, r10 ADD r7, r10 # a_shift = 8 * k - 64 LSL r10, r10, 3 VDUP.32 d13, r10 # Load va0 # - d1 = va0 VLD1.8 {d1}, [r4] # Load va1 # - d3 = va1 VLD1.8 {d3}, [r5] # Load b0-b7 (channel 0) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # Load a2 # - d5 = a2 VLD1.8 {d5}, [r6] # q0 = va0 = a0 VSHL.U64 d1, d1, d13 SUB_ZERO_POINT q0, d1, d14 # Load a3 # - d7 = a3 VLD1.8 {d7}, [r7] # q1 = va1 = a1 VSHL.U64 d3, d3, d13 SUB_ZERO_POINT q1, d3, d14 # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 0) # - d9 = vb4567 (channel 0) VSUBL.U8 q4, d9, d15 # q2 = va2 = a2 VSHL.U64 d5, d5, d13 SUB_ZERO_POINT q2, d5, d14 # q3 = va3 = a3 VSHL.U64 d7, d7, d13 SUB_ZERO_POINT q3, d7, d14 ### Channel 0 ### # vacc0x0123 += vb0123 * va0[0] VMLAL.S16 q8, d8, d0[0] # vacc0x4567 += vb4567 * va0[0] VMLAL.S16 q9, d9, d0[0] # vacc1x0123 += vb0123 * va1[0] VMLAL.S16 q10, d8, d2[0] # vacc1x4567 += vb4567 * va1[0] VMLAL.S16 q11, d9, d2[0] # vacc2x0123 += vb0123 * va2[0] VMLAL.S16 q12, d8, d4[0] # vacc2x4567 += vb4567 * va2[0] VMLAL.S16 q13, d9, d4[0] # vacc3x0123 += vb0123 * va3[0] VMLAL.S16 q14, d8, d6[0] # vacc3x4567 += vb4567 * va3[0] VMLAL.S16 q15, d9, d6[0] CMP r10, -48 BLO 3f ### Channel 1 ### # Load b0-b7 (channel 1) # - d11 = b0-b7 VLD1.8 {d11}, [ip:64]! # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 1) # - d11 = vb4567 (channel 1) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[1] VMLAL.S16 q8, d10, d0[1] # vacc0x4567 += vb4567 * va0[1] VMLAL.S16 q9, d11, d0[1] # vacc1x0123 += vb0123 * va1[1] VMLAL.S16 q10, d10, d2[1] # vacc1x4567 += vb4567 * va1[1] VMLAL.S16 q11, d11, d2[1] # vacc2x0123 += vb0123 * va2[1] VMLAL.S16 q12, d10, d4[1] # vacc2x4567 += vb4567 * va2[1] VMLAL.S16 q13, d11, d4[1] # vacc3x0123 += vb0123 * va3[1] VMLAL.S16 q14, d10, d6[1] # vacc3x4567 += vb4567 * va3[1] VMLAL.S16 q15, d11, d6[1] ### Channel 2 ### BLS 3f # Load b0-b7 (channel 2) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 2) # - d9 = vb4567 (channel 2) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[2] VMLAL.S16 q8, d8, d0[2] # vacc0x4567 += vb4567 * va0[2] VMLAL.S16 q9, d9, d0[2] # vacc1x0123 += vb0123 * va1[2] VMLAL.S16 q10, d8, d2[2] # vacc1x4567 += vb4567 * va1[2] VMLAL.S16 q11, d9, d2[2] # vacc2x0123 += vb0123 * va2[2] VMLAL.S16 q12, d8, d4[2] # vacc2x4567 += vb4567 * va2[2] VMLAL.S16 q13, d9, d4[2] # vacc3x0123 += vb0123 * va3[2] VMLAL.S16 q14, d8, d6[2] # vacc3x4567 += vb4567 * va3[2] VMLAL.S16 q15, d9, d6[2] ### Channel 3 ### CMP r10, -32 BLO 3f # Load b0-b7 (channel 3) # - d9 = b0-b7 VLD1.8 {d11}, [ip:64]! # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 3) # - d9 = vb4567 (channel 3) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[3] VMLAL.S16 q8, d10, d0[3] # vacc0x4567 += vb4567 * va0[3] VMLAL.S16 q9, d11, d0[3] # vacc1x0123 += vb0123 * va1[3] VMLAL.S16 q10, d10, d2[3] # vacc1x4567 += vb4567 * va1[3] VMLAL.S16 q11, d11, d2[3] # vacc2x0123 += vb0123 * va2[3] VMLAL.S16 q12, d10, d4[3] # vacc2x4567 += vb4567 * va2[3] VMLAL.S16 q13, d11, d4[3] # vacc3x0123 += vb0123 * va3[3] VMLAL.S16 q14, d10, d6[3] # vacc3x4567 += vb4567 * va3[3] VMLAL.S16 q15, d11, d6[3] ### Channel 4 ### BLS 3f # Load b0-b7 (channel 4) # - d11 = b0-b7 VLD1.8 {d9}, [ip:64]! # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 4) # - d11 = vb4567 (channel 4) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[4] VMLAL.S16 q8, d8, d1[0] # vacc0x4567 += vb4567 * va0[4] VMLAL.S16 q9, d9, d1[0] # vacc1x0123 += vb0123 * va1[4] VMLAL.S16 q10, d8, d3[0] # vacc1x4567 += vb4567 * va1[4] VMLAL.S16 q11, d9, d3[0] # vacc2x0123 += vb0123 * va2[4] VMLAL.S16 q12, d8, d5[0] # vacc2x4567 += vb4567 * va2[4] VMLAL.S16 q13, d9, d5[0] # vacc3x0123 += vb0123 * va3[4] VMLAL.S16 q14, d8, d7[0] # vacc3x4567 += vb4567 * va3[4] VMLAL.S16 q15, d9, d7[0] ### Channel 5 ### CMP r10, -16 BLO 3f # Load b0-b7 (channel 5) # - d13 = b0-b7 VLD1.8 {d11}, [ip:64]! # q5 = b0:7 - vb_zero_point # - d10 = vb0123 (channel 5) # - d11 = vb4567 (channel 5) VSUBL.U8 q5, d11, d15 # vacc0x0123 += vb0123 * va0[5] VMLAL.S16 q8, d10, d1[1] # vacc0x4567 += vb4567 * va0[5] VMLAL.S16 q9, d11, d1[1] # vacc1x0123 += vb0123 * va1[5] VMLAL.S16 q10, d10, d3[1] # vacc1x4567 += vb4567 * va1[5] VMLAL.S16 q11, d11, d3[1] # vacc2x0123 += vb0123 * va2[5] VMLAL.S16 q12, d10, d5[1] # vacc2x4567 += vb4567 * va2[5] VMLAL.S16 q13, d11, d5[1] # vacc3x0123 += vb0123 * va3[5] VMLAL.S16 q14, d10, d7[1] # vacc3x4567 += vb4567 * va3[5] VMLAL.S16 q15, d11, d7[1] ### Channel 6 ### BLS 3f # Load b0-b7 (channel 6) # - d9 = b0-b7 VLD1.8 {d9}, [ip:64]! # q4 = b0:7 - vb_zero_point # - d8 = vb0123 (channel 6) # - d9 = vb4567 (channel 6) VSUBL.U8 q4, d9, d15 # vacc0x0123 += vb0123 * va0[6] VMLAL.S16 q8, d8, d1[2] # vacc0x4567 += vb4567 * va0[6] VMLAL.S16 q9, d9, d1[2] # vacc1x0123 += vb0123 * va1[6] VMLAL.S16 q10, d8, d3[2] # vacc1x4567 += vb4567 * va1[6] VMLAL.S16 q11, d9, d3[2] # vacc2x0123 += vb0123 * va2[6] VMLAL.S16 q12, d8, d5[2] # vacc2x4567 += vb4567 * va2[6] VMLAL.S16 q13, d9, d5[2] # vacc3x0123 += vb0123 * va3[6] VMLAL.S16 q14, d8, d7[2] # vacc3x4567 += vb4567 * va3[6] VMLAL.S16 q15, d9, d7[2] .p2align 4 3: SUBS r3, r3, 1 BNE 0b # Load requantization_scale: # - d12 = requantization_scale VLD1.32 {d12, d13}, [r11]! # Load vfmax: VLD1.32 {d10[], d11[]}, [r9]! VLD1.32 {d4, d5}, [r11] # Load vfmin: VLD1.32 {d8[], d9[]}, [r9]! # Load vfmagic: VLD1.32 {d0[], d1[]}, [r9]! # Load vimagic: VLD1.32 {d2[], d3[]}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q6 VMUL.F32 q9, q9, q2 VMUL.F32 q10, q10, q6 VMUL.F32 q11, q11, q2 VMUL.F32 q12, q12, q6 VMUL.F32 q13, q13, q2 VMUL.F32 q14, q14, q6 VMUL.F32 q15, q15, q2 VMIN.F32 q8, q8, q5 VMIN.F32 q9, q9, q5 VMIN.F32 q10, q10, q5 VMIN.F32 q11, q11, q5 VMIN.F32 q12, q12, q5 VMIN.F32 q13, q13, q5 VMIN.F32 q14, q14, q5 VMIN.F32 q15, q15, q5 VMAX.F32 q8, q8, q4 VMAX.F32 q9, q9, q4 VMAX.F32 q10, q10, q4 VMAX.F32 q11, q11, q4 VMAX.F32 q12, q12, q4 VMAX.F32 q13, q13, q4 VMAX.F32 q14, q14, q4 VMAX.F32 q15, q15, q4 VADD.F32 q8, q8, q0 VADD.F32 q9, q9, q0 VADD.F32 q10, q10, q0 VADD.F32 q11, q11, q0 VADD.F32 q12, q12, q0 VADD.F32 q13, q13, q0 VADD.F32 q14, q14, q0 VADD.F32 q15, q15, q0 # Load c, c_stride: # - r2 = c # - r2 = c_stride LDRD r2, r3, [sp, 104] VSUB.S32 q8, q8, q1 VSUB.S32 q9, q9, q1 VSUB.S32 q10, q10, q1 VSUB.S32 q11, q11, q1 VSUB.S32 q12, q12, q1 VSUB.S32 q13, q13, q1 VSUB.S32 q14, q14, q1 VSUB.S32 q15, q15, q1 ADD r4, r2, r3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 CMP r0, 2 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 MOVLO r4, r2 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 ADD r5, r4, r3 VQMOVUN.S16 d16, q8 MOVLS r5, r4 VQMOVUN.S16 d17, q9 VQMOVUN.S16 d18, q10 CMP r0, 4 ADD r3, r5, r3 MOVNE r3, r5 CMP r1, 8 VQMOVUN.S16 d19, q11 BNE 5f VST1.8 {d16}, [r2] VST1.8 {d17}, [r4] VST1.8 {d18}, [r5] VST1.8 {d19}, [r3] VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr .p2align 3 5: CMP r1, 4 BLO 6f VST1.32 {d16[0]}, [r2]! VST1.32 {d17[0]}, [r4]! VST1.32 {d18[0]}, [r5]! VST1.32 {d19[0]}, [r3]! SUB r1, 4 VEXT.8 q8, q8, q8, 4 VEXT.8 q9, q9, q9, 4 6: CMP r1, 2 BLO 7f VST1.16 {d16[0]}, [r2]! VST1.16 {d17[0]}, [r4]! VST1.16 {d18[0]}, [r5]! VST1.16 {d19[0]}, [r3]! SUB r1, 2 VEXT.8 q8, q8, q8, 2 VEXT.8 q9, q9, q9, 2 7: TEQ r1, 0 BEQ 8f VST1.8 {d16[0]}, [r2] VST1.8 {d17[0]}, [r4] VST1.8 {d18[0]}, [r5] VST1.8 {d19[0]}, [r3] 8: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
7,829
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> .syntax unified # void pytorch_q8dwconv_ukernel_up8x9__aarch32_neon( # size_t channels, # size_t output_width, # const uint8_t** input, # const void* weights, # uint8_t* output, # size_t input_stride, # size_t output_increment, # const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load params # - r12 = quantization_params LDR r12, [sp, 12] PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} VPUSH {d8-d15} STR r0, [sp, #-8] STR r3, [sp, #-4] # Load the address zero_point array. # For depth wise kernels the array is of single element. LDR r5, [r12], 4 # Load o: # - lr = o = output LDR lr, [sp, 100] # Load kernel zero point: # - d31 = vkernel_zero_point VLD1.8 {d31[]}, [r5] # Load input zero point: # - d30 = vinput_zero_point VLD1.8 {d30[]}, [r12] # Load the address requantization_scale array. # For depth wise kernels the array is of single element. # pre-index r12 = r12 + 4 LDR r5, [r12, 4]! # add 8 bytes to get to vfmax ADD r12, r12, 8 # Load requantization_scale: # - q14 = d28:d29 = requantization_scale VLD1.32 {d28[], d29[]}, [r5] # Load vfmax: # - q13 = d26:d27 = vfmax VLD1.32 {d26[], d27[]}, [r12]! # Load vfmin: # - q12 = d24:d25 = vfmin VLD1.32 {d24[], d25[]}, [r12]! # Load vfmagic: # - q10 = d20:d21 = vfmagic VLD1.32 {d20[], d21[]}, [r12]! # Load vimagic: # - q11 = d22:d23 = vimagic # Since q11/d22 gets used in the remainder channels section # This load will have to occur in that section again. # But since r12 is overwritten below, we will have to push it # on the stack and pop it back. VLD1.32 {d22[], d23[]}, [r12] VSTR d22, [sp, #-16] VSTR d23, [sp, #-24] .p2align 3 0: # Load input stride # - r3 = input_stride LDR r3, [sp, 104] # Load c: # - r0 = c = channels LDR r0, [sp, #-8] # Load i0, i1, i2, i3, i4, i5, i6, i7, i8 # - r4 = i0 # - r5 = i1 # - r6 = i2 # - r7 = i3 # - r8 = i4 # - r9 = i5 # - r10 = i6 # - r11 = i7 # - r12 = i8 LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12} # Pre-decrement c SUBS r0, r0, 8 # Increment input by input stride # - input = r2 := input + input_stride ADD r2, r2, r3 # Load w: # - r3 = w = weights LDR r3, [sp, #-4] BLO 2f .p2align 4 1: VLDM r3!, {d0-d3} VLD1.8 {d4}, [r4]! VLD1.8 {d6}, [r3]! VLD1.8 {d8}, [r5]! VLD1.8 {d10}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VLD1.8 {d12}, [r6]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r7]! VLD1.8 {d6}, [r3]! SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VLD1.8 {d8}, [r8]! VLD1.8 {d10}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VLD1.8 {d12}, [r9]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r10]! VLD1.8 {d6}, [r3]! SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VLD1.8 {d8}, [r11]! VLD1.8 {d10}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VLD1.8 {d12}, [r12]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VCVT.F32.S32 q0, q0 VCVT.F32.S32 q1, q1 VMUL.F32 q0, q0, q14 VMUL.F32 q1, q1, q14 VMIN.F32 q0, q0, q13 VMIN.F32 q1, q1, q13 VMAX.F32 q0, q0, q12 VMAX.F32 q1, q1, q12 VADD.F32 q0, q0, q10 VADD.F32 q1, q1, q10 VSUB.S32 q0, q0, q11 VSUB.S32 q1, q1, q11 VQMOVN.S32 d0, q0 VQMOVN.S32 d1, q1 VQMOVUN.S16 d0, q0 VST1.8 {d0}, [lr]! SUBS r0, r0, 8 BHS 1b 2: CMP r0, -8 BEQ 5f ADD r4, r4, r0 ADD r5, r5, r0 ADD r6, r6, r0 ADD r7, r7, r0 ADD r8, r8, r0 ADD r9, r9, r0 ADD r10, r10, r0 ADD r11, r11, r0 ADD r12, r12, r0 LSL r0, r0, 3 VDUP.32 d22, r0 VLDM r3!, {d0-d3} VLD1.8 {d4}, [r4]! VLD1.8 {d6}, [r3]! VLD1.8 {d8}, [r5]! VLD1.8 {d10}, [r3]! VSHL.U64 d4, d4, d22 VLD1.8 {d12}, [r6]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VSHL.U64 d8, d8, d22 VLD1.8 {d16}, [r7]! VLD1.8 {d18}, [r3]! VSHL.U64 d12, d12, d22 SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r8]! VLD1.8 {d6}, [r3]! VSHL.U64 d16, d16, d22 SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VLD1.8 {d8}, [r9]! VLD1.8 {d10}, [r3]! VSHL.U64 d4, d4, d22 SUB_ZERO_POINT q8, d16, d30 VSUBL.U8 q9, d18, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VLD1.8 {d12}, [r10]! VLD1.8 {d14}, [r3]! VSHL.U64 d8, d8, d22 SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d16, d18 VMLAL.S16 q1, d17, d19 VLD1.8 {d16}, [r11]! VLD1.8 {d18}, [r3]! VSHL.U64 d12, d12, d22 SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r12]! VLD1.8 {d6}, [r3]! VSHL.U64 d16, d16, d22 SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VSHL.U64 d4, d4, d22 SUB_ZERO_POINT q8, d16, d30 VSUBL.U8 q9, d18, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d16, d18 VMLAL.S16 q1, d17, d19 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLDR.64 d22, [sp, #-16] VLDR.64 d23, [sp, #-24] VCVT.F32.S32 q0, q0 VCVT.F32.S32 q1, q1 VMUL.F32 q0, q0, q14 VMUL.F32 q1, q1, q14 VMIN.F32 q0, q0, q13 VMIN.F32 q1, q1, q13 VMAX.F32 q0, q0, q12 VMAX.F32 q1, q1, q12 VADD.F32 q0, q0, q10 VADD.F32 q1, q1, q10 VSUB.S32 q0, q0, q11 VSUB.S32 q1, q1, q11 VQMOVN.S32 d0, q0 VQMOVN.S32 d1, q1 VQMOVUN.S16 d0, q0 TST r0, 32 BEQ 3f VST1.32 {d0[0]}, [lr]! VEXT.8 d0, d0, 4 3: TST r0, 16 BEQ 4f VST1.16 {d0[0]}, [lr]! VEXT.8 d0, d0, 2 4: TST r0, 8 BEQ 5f VST1.8 {d0[0]}, [lr]! 5: # Load output increment # - r3 = output_increment LDR r3, [sp, 108] # Decrement output width SUBS r1, r1, 1 # Increment output by output_increment ADD lr, lr, r3 # If output width is non-zero, process another pixel BNE 0b VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
ShaoxunZeng/PyTorch-Medusa
9,433
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon-per-channel.S
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <qnnpack/assembly.h> #include <requantization/runtime-assembly.h> .syntax unified # void pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon( # size_t channels, # size_t output_width, # const uint8_t** input, # const void* weights, # uint8_t* output, # size_t input_stride, # size_t output_increment, # const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1]) BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Load params # - r12 = quantization_params LDR r12, [sp, 12] PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} VPUSH {d8-d15} STR r0, [sp, #-8] STR r3, [sp, #-4] STR r1, [sp, #-12] STR r2, [sp, #-16] # Load the address zero_point array. LDR r5, [r12], 4 # Push the zero_point_array base pointer on stack # We dont have enough registers to maintain # base pointers. Thus we will have to do some pushes # and pops. # At sp #-20 we store updated/working copy pointers # At sp #-28 we store orig pointers that can be reloaded # for more output pixels STR r5, [sp, #-28] # Load o: # - lr = o = output LDR lr, [sp, 100] # Load input zero point: # - d30 = vinput_zero_point VLD1.8 {d30[]}, [r12] # Load the address requantization_scale array. # For depth wise kernels the array is of single element. # pre-index r12 = r12 + 4 LDR r5, [r12, 4]! # Push the requantization_scales base pointer on stack # At sp #-24 we store updated/working copy pointers # At sp #-32 we store orig pointers that can be reloaded # for more output pixels STR r5, [sp, #-32] # add 8 bytes to get to vfmax ADD r12, r12, 8 # Load vfmax: # - q13 = d26:d27 = vfmax VLD1.32 {d26[], d27[]}, [r12]! # Load vfmin: # - q12 = d24:d25 = vfmin VLD1.32 {d24[], d25[]}, [r12]! # Load vfmagic: # - q10 = d20:d21 = vfmagic VLD1.32 {d20[], d21[]}, [r12]! # Load vimagic: # - q11 = d22:d23 = vimagic # Since q11/d22 gets used in the remainder channels section # This load will have to occur in that section again. # But since r12 is overwritten below, we will have to push it # on the stack and pop it back. VLD1.32 {d22[], d23[]}, [r12] VSTR d22, [sp, #-40] VSTR d23, [sp, #-48] .p2align 3 0: # Load original zero point base pointer LDR r4, [sp, #-28] # Load original requant scale base pointer LDR r5, [sp, #-32] # Load indirection pointer from stack LDR r2, [sp, #-16] # Load input stride # - r3 = input_stride LDR r3, [sp, 104] # Store original zero point to working copy STR r4, [sp, #-20] # Store original requant scale to working copy STR r5, [sp, #-24] # Load c: # - r0 = c = channels LDR r0, [sp, #-8] # Load i0, i1, i2, i3, i4, i5, i6, i7, i8 # - r4 = i0 # - r5 = i1 # - r6 = i2 # - r7 = i3 # - r8 = i4 # - r9 = i5 # - r10 = i6 # - r11 = i7 # - r12 = i8 LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12} # Pre-decrement c SUBS r0, r0, 8 # Increment input by input stride # - input = r2 := input + input_stride ADD r2, r2, r3 STR r2, [sp, #-16] # Load w: # - r3 = w = weights LDR r3, [sp, #-4] BLO 2f .p2align 4 1: VLDM r3!, {d0-d3} VLD1.8 {d4}, [r4]! VLD1.8 {d6}, [r3]! # zero point array base address LDR r1, [sp, #-20] # requantization scale array base address LDR r2, [sp, #-24] VLD1.8 {d8}, [r5]! VLD1.8 {d10}, [r3]! # - d31 = vkernel_zero_point VLD1.8 {d31}, [r1]! # - q8 = d16:d17= requantization_scale_lo VLD1.32 {d16, d17}, [r2]! # - q14 = d28:d29 = requantization_scale_hi VLD1.32 {d28, d29}, [r2]! STR r1, [sp, #-20] STR r2, [sp, #-24] SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VLD1.8 {d12}, [r6]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r7]! VLD1.8 {d6}, [r3]! SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VLD1.8 {d8}, [r8]! VLD1.8 {d10}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VLD1.8 {d12}, [r9]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r10]! VLD1.8 {d6}, [r3]! SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VLD1.8 {d8}, [r11]! VLD1.8 {d10}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VLD1.8 {d12}, [r12]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VCVT.F32.S32 q0, q0 VCVT.F32.S32 q1, q1 VMUL.F32 q0, q0, q8 VMUL.F32 q1, q1, q14 VMIN.F32 q0, q0, q13 VMIN.F32 q1, q1, q13 VMAX.F32 q0, q0, q12 VMAX.F32 q1, q1, q12 VADD.F32 q0, q0, q10 VADD.F32 q1, q1, q10 VSUB.S32 q0, q0, q11 VSUB.S32 q1, q1, q11 VQMOVN.S32 d0, q0 VQMOVN.S32 d1, q1 VQMOVUN.S16 d0, q0 VST1.8 {d0}, [lr]! SUBS r0, r0, 8 BHS 1b 2: CMP r0, -8 BEQ 5f # zero point array base address LDR r1, [sp, #-20] # requantization scale array base address LDR r2, [sp, #-24] ADD r4, r4, r0 ADD r5, r5, r0 ADD r6, r6, r0 ADD r7, r7, r0 ADD r8, r8, r0 ADD r9, r9, r0 ADD r10, r10, r0 ADD r11, r11, r0 ADD r12, r12, r0 # - d31 = vkernel_zero_point VLD1.8 {d31}, [r1] LSL r0, r0, 3 VDUP.32 d22, r0 VLDM r3!, {d0-d3} VLD1.8 {d4}, [r4]! VLD1.8 {d6}, [r3]! VLD1.8 {d8}, [r5]! VLD1.8 {d10}, [r3]! VSHL.U64 d4, d4, d22 VLD1.8 {d12}, [r6]! VLD1.8 {d14}, [r3]! SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VSHL.U64 d8, d8, d22 VLD1.8 {d16}, [r7]! VLD1.8 {d18}, [r3]! VSHL.U64 d12, d12, d22 SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r8]! VLD1.8 {d6}, [r3]! VSHL.U64 d16, d16, d22 SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VLD1.8 {d8}, [r9]! VLD1.8 {d10}, [r3]! VSHL.U64 d4, d4, d22 SUB_ZERO_POINT q8, d16, d30 VSUBL.U8 q9, d18, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 VLD1.8 {d12}, [r10]! VLD1.8 {d14}, [r3]! VSHL.U64 d8, d8, d22 SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d16, d18 VMLAL.S16 q1, d17, d19 VLD1.8 {d16}, [r11]! VLD1.8 {d18}, [r3]! VSHL.U64 d12, d12, d22 SUB_ZERO_POINT q4, d8, d30 VSUBL.U8 q5, d10, d31 VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLD1.8 {d4}, [r12]! VLD1.8 {d6}, [r3]! VSHL.U64 d16, d16, d22 SUB_ZERO_POINT q6, d12, d30 VSUBL.U8 q7, d14, d31 VMLAL.S16 q0, d8, d10 VMLAL.S16 q1, d9, d11 VSHL.U64 d4, d4, d22 SUB_ZERO_POINT q8, d16, d30 VSUBL.U8 q9, d18, d31 VMLAL.S16 q0, d12, d14 VMLAL.S16 q1, d13, d15 SUB_ZERO_POINT q2, d4, d30 VSUBL.U8 q3, d6, d31 VMLAL.S16 q0, d16, d18 VMLAL.S16 q1, d17, d19 # - q8 = d16:d17= requantization_scale_lo VLD1.32 {d16, d17}, [r2]! # - q14 = d28:d29 = requantization_scale_hi VLD1.32 {d28, d29}, [r2] VMLAL.S16 q0, d4, d6 VMLAL.S16 q1, d5, d7 VLDR.64 d22, [sp, #-40] VLDR.64 d23, [sp, #-48] VCVT.F32.S32 q0, q0 VCVT.F32.S32 q1, q1 VMUL.F32 q0, q0, q8 VMUL.F32 q1, q1, q14 VMIN.F32 q0, q0, q13 VMIN.F32 q1, q1, q13 VMAX.F32 q0, q0, q12 VMAX.F32 q1, q1, q12 VADD.F32 q0, q0, q10 VADD.F32 q1, q1, q10 VSUB.S32 q0, q0, q11 VSUB.S32 q1, q1, q11 VQMOVN.S32 d0, q0 VQMOVN.S32 d1, q1 VQMOVUN.S16 d0, q0 TST r0, 32 BEQ 3f VST1.32 {d0[0]}, [lr]! VEXT.8 d0, d0, 4 3: TST r0, 16 BEQ 4f VST1.16 {d0[0]}, [lr]! VEXT.8 d0, d0, 2 4: TST r0, 8 BEQ 5f VST1.8 {d0[0]}, [lr]! 5: # Load output_width from stack LDR r1, [sp, #-12] # Load output increment # - r3 = output_increment LDR r3, [sp, 108] # Decrement output width SUBS r1, r1, 1 # store output_width on stack STR r1, [sp, #-12] # Increment output by output_increment ADD lr, lr, r3 # If output width is non-zero, process another pixel BNE 0b VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
2,603
test/apps/fork/fork.S
# SPDX-License-Identifier: MPL-2.0 # FIXME: WNOHANG option currently does not work properly without preemption, so we have temporarily # removed it. Once preemption is supported, the following macro can be uncommented to add the WNOHANG # option back. # #define PREEMPTION_ENABLE .global _start .section .text _start: call print_hello_world mov $57, %rax # syscall number of fork syscall cmp $0, %rax je _child # child process jmp _parent # parent process _parent: call wait_child call get_pid call print_parent_message call exit _child: call get_pid call print_child_message call exit wait_child: mov %rax, %rdi # child process id #ifdef PREEMPTION_ENABLE _loop: mov $61, %rax # syscall number of wait4 mov $0, %rsi # exit status address mov $1, %rdx # wait option: WNOHANG syscall cmp %rdi, %rax # The return value is the pid of child jne _loop ret #else mov $61, %rax # syscall number of wait4 mov $0, %rsi # exit status address mov $0, %rdx # wait option syscall ret #endif exit: mov $60, %rax # syscall number of exit mov $0, %rdi # exit code syscall get_pid: mov $39, %rax syscall ret print_hello_world: mov $message, %rsi # address of message mov $message_end, %rdx sub %rsi, %rdx # calculate message len jmp _print_message print_parent_message: mov $message_parent, %rsi # address of message mov $message_parent_end, %rdx sub %rsi, %rdx # calculate message len jmp _print_message print_child_message: mov $message_child, %rsi # address of message mov $message_child_end, %rdx sub %rsi, %rdx # calculate message len jmp _print_message # never directly call _print_message _print_message: mov $1, %rax # syscall number of write mov $1, %rdi # stdout syscall ret .section .rodata message: .ascii "Hello, world in fork\n" message_end: message_parent: .ascii "Hello world from parent\n" message_parent_end: message_child: .ascii "Hello world from child\n" message_child_end:
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
1,106
ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S
/* SPDX-License-Identifier: MPL-2.0 */ .section ".setup", "ax" .code64 // start_of_setup32 should be loaded at CODE32_START, which is our base. .global start_of_setup32 start_of_setup32: // `efi_handover_setup_entry64` should be at efi_handover_setup_entry32 + 0x200, but // we could provide the 32 bit dummy entry point as the 64 bit entry point - 0x200 // since we do not provide 32-bit entry point in the x86_64 specific implementation. .org 0x210 .global efi_handover_setup_entry efi_handover_setup_entry: // The 3 parameters of is stored in rdi, rsi and rdx (sysv64). // Do not use them. // Setup the stack. lea rsp, [rip + setup_stack_top] lea rax, [rip + halt] push rax # the return address mov rbp, rsp add rbp, -4 push rbp mov rbp, rsp .extern efi_handover_entry lea rax, [rip + efi_handover_entry] call rax // Unreachable here. halt: hlt jmp halt // A small stack for the setup code. .section .data .align 0x1000 / 8 .global setup_stack setup_stack: .skip 0x1000 .global setup_stack_top setup_stack_top:
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
2,407
ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // The compatibility file for the Linux x86 Boot Protocol. // See https://www.kernel.org/doc/html/v5.6/x86/boot.html for // more information on the Linux x86 Boot Protocol. // Some of the fields filled with a 0xab* values should be filled // by the torjan builder. // Asterinas will use only a few of these fields, and some of them // are filled by the loader and will be read by Asterinas. .section ".header", "a" CODE32_START = 0x100000 SETUP_SECTS = 7 # so that the legacy setup could occupy a page SETUP_SECTS_SIZE = 0x200 * (SETUP_SECTS + 1) .code16 .org 0x01f1 hdr_start: setup_sects: .byte SETUP_SECTS root_flags: .word 1 syssize: .long 0 ram_size: .word 0 vid_mode: .word 0xfffd root_dev: .word 0 boot_flag: .word 0xAA55 jump: .byte 0xeb jump_addr: .byte hdr_end-jump_addr magic: .ascii "HdrS" .word 0x020f realmode_swtch: .word 0, 0 start_sys_seg: .word 0 .word 0 type_of_loader: .byte 0 loadflags: .byte (1 << 0) setup_move_size: .word 0 code32_start: .long CODE32_START ramdisk_image: .long 0 ramdisk_size: .long 0 bootsect_kludge: .long 0 heap_end_ptr: .word 65535 ext_loader_ver: .byte 0 ext_loader_type: .byte 0 cmd_line_ptr: .long 0 initrd_addr_max: .long 0x7fffffff kernel_alignment: .long 0x1000000 relocatable_kernel: .byte 0 min_alignment: .byte 0x10 xloadflags: .word 0b01111 # all handover protocols except kexec cmdline_size: .long 4096-1 hardware_subarch: .long 0 hardware_subarch_data: .quad 0 payload_offset: .long 0xabababab # at 0x248/4, to be filled by the builder payload_length: .long 0xabababab # at 0x24c/4, to be filled by the builder setup_data: .quad 0 pref_address: .quad CODE32_START - SETUP_SECTS_SIZE init_size: .long 0xabababab # at 0x260/4, to be filled by the builder # The handover_offset should be efi_handover_setup_entry - CODE32_START - 0x200 # But we use ABI workaround to avoid the relocation of efi_handover_setup_entry handover_offset: .long 0x10 kernel_info_offset: .long 0 hdr_end:
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
2,165
ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // The compatibility file for the Linux x86 Boot Protocol. // See https://www.kernel.org/doc/html/v5.6/x86/boot.html for // more information on the Linux x86 Boot Protocol. // Some of the fields filled with a 0xab* values should be filled // by the torjan builder. // Asterinas will use only a few of these fields, and some of them // are filled by the loader and will be read by Asterinas. .section ".header", "a" CODE32_START = 0x100000 SETUP_SECTS = 7 # so that the legacy setup could occupy a page .code16 .org 0x01f1 hdr_start: setup_sects: .byte SETUP_SECTS root_flags: .word 1 syssize: .long 0 ram_size: .word 0 vid_mode: .word 0xfffd root_dev: .word 0 boot_flag: .word 0xAA55 jump: .byte 0xeb jump_addr: .byte hdr_end-jump_addr magic: .ascii "HdrS" .word 0x020f realmode_swtch: .word 0, 0 start_sys_seg: .word 0 .word 0 type_of_loader: .byte 0 loadflags: .byte (1 << 0) setup_move_size: .word 0 code32_start: .long CODE32_START ramdisk_image: .long 0 ramdisk_size: .long 0 bootsect_kludge: .long 0 heap_end_ptr: .word 65535 ext_loader_ver: .byte 0 ext_loader_type: .byte 0 cmd_line_ptr: .long 0 initrd_addr_max: .long 0x7fffffff kernel_alignment: .long 0x1000000 relocatable_kernel: .byte 0 min_alignment: .byte 0x10 xloadflags: .word 0 cmdline_size: .long 4096-1 hardware_subarch: .long 0 hardware_subarch_data: .quad 0 payload_offset: .long 0xabababab # at 0x248/4, to be filled by the builder payload_length: .long 0xabababab # at 0x24c/4, to be filled by the builder setup_data: .quad 0 pref_address: .quad CODE32_START - 0x200 * (SETUP_SECTS + 1); init_size: .long 0xabababab # at 0x260/4, to be filled by the builder handover_offset: .long 0 kernel_info_offset: .long 0 hdr_end:
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
4,069
ostd/src/arch/riscv/trap/trap.S
/* SPDX-License-Identifier: MPL-2.0 OR MIT * * The original source code is from [trapframe-rs](https://github.com/rcore-os/trapframe-rs), * which is released under the following license: * * SPDX-License-Identifier: MIT * * Copyright (c) 2020 - 2024 Runji Wang * * We make the following new changes: * * Add the `trap_handler_table`. * * These changes are released under the following license: * * SPDX-License-Identifier: MPL-2.0 */ # Constants / Macros defined in Rust code: # XLENB # LOAD # STORE .section .text .global trap_entry .balign 4 trap_entry: # If coming from userspace, preserve the user stack pointer and load # the kernel stack pointer. If we came from the kernel, sscratch # will contain 0, and we should continue on the current stack. csrrw sp, sscratch, sp bnez sp, trap_from_user trap_from_kernel: csrr sp, sscratch addi sp, sp, -34 * XLENB # sscratch = previous-sp, sp = kernel-sp trap_from_user: # save general registers except sp(x2) STORE_SP x1, 1 STORE_SP x3, 3 STORE_SP x4, 4 STORE_SP x5, 5 STORE_SP x6, 6 STORE_SP x7, 7 STORE_SP x8, 8 STORE_SP x9, 9 STORE_SP x10, 10 STORE_SP x11, 11 STORE_SP x12, 12 STORE_SP x13, 13 STORE_SP x14, 14 STORE_SP x15, 15 STORE_SP x16, 16 STORE_SP x17, 17 STORE_SP x18, 18 STORE_SP x19, 19 STORE_SP x20, 20 STORE_SP x21, 21 STORE_SP x22, 22 STORE_SP x23, 23 STORE_SP x24, 24 STORE_SP x25, 25 STORE_SP x26, 26 STORE_SP x27, 27 STORE_SP x28, 28 STORE_SP x29, 29 STORE_SP x30, 30 STORE_SP x31, 31 # save sp, sstatus, sepc csrrw t0, sscratch, x0 # sscratch = 0 (kernel) csrr t1, sstatus csrr t2, sepc STORE_SP t0, 2 # save sp STORE_SP t1, 32 # save sstatus STORE_SP t2, 33 # save sepc li t0, 3 << 13 or t1, t1, t0 # sstatus.FS = Dirty (3) csrw sstatus, t1 andi t1, t1, 1 << 8 # sstatus.SPP == 1 beqz t1, end_trap_from_user end_trap_from_kernel: mv a0, sp # first arg is TrapFrame la ra, trap_return # set return address j trap_handler end_trap_from_user: # load callee-saved registers LOAD_SP sp, 0 LOAD_SP s0, 0 LOAD_SP s1, 1 LOAD_SP s2, 2 LOAD_SP s3, 3 LOAD_SP s4, 4 LOAD_SP s5, 5 LOAD_SP s6, 6 LOAD_SP s7, 7 LOAD_SP s8, 8 LOAD_SP s9, 9 LOAD_SP s10, 10 LOAD_SP s11, 11 LOAD_SP ra, 12 # not callee-saved, but is used to store mhartid LOAD_SP gp, 13 addi sp, sp, 14 * XLENB ret .global run_user run_user: # save callee-saved registers addi sp, sp, -14 * XLENB STORE_SP s0, 0 STORE_SP s1, 1 STORE_SP s2, 2 STORE_SP s3, 3 STORE_SP s4, 4 STORE_SP s5, 5 STORE_SP s6, 6 STORE_SP s7, 7 STORE_SP s8, 8 STORE_SP s9, 9 STORE_SP s10, 10 STORE_SP s11, 11 STORE_SP ra, 12 # not callee-saved, but is used to store mhartid STORE_SP gp, 13 mv t0, sp mv sp, a0 STORE_SP t0, 0 # save kernel-sp csrw sscratch, sp # sscratch = bottom of trap frame trap_return: LOAD_SP t0, 32 # t0 = sstatus LOAD_SP t1, 33 # t1 = sepc csrw sstatus, t0 # load sstatus csrw sepc, t1 # load sepc # restore general registers except sp(x2) LOAD_SP x1, 1 LOAD_SP x3, 3 LOAD_SP x4, 4 LOAD_SP x5, 5 LOAD_SP x6, 6 LOAD_SP x7, 7 LOAD_SP x8, 8 LOAD_SP x9, 9 LOAD_SP x10, 10 LOAD_SP x11, 11 LOAD_SP x12, 12 LOAD_SP x13, 13 LOAD_SP x14, 14 LOAD_SP x15, 15 LOAD_SP x16, 16 LOAD_SP x17, 17 LOAD_SP x18, 18 LOAD_SP x19, 19 LOAD_SP x20, 20 LOAD_SP x21, 21 LOAD_SP x22, 22 LOAD_SP x23, 23 LOAD_SP x24, 24 LOAD_SP x25, 25 LOAD_SP x26, 26 LOAD_SP x27, 27 LOAD_SP x28, 28 LOAD_SP x29, 29 LOAD_SP x30, 30 LOAD_SP x31, 31 # restore sp last LOAD_SP x2, 2 # return from supervisor call sret
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
1,292
ostd/src/arch/riscv/boot/boot.S
/* SPDX-License-Identifier: MPL-2.0 */ .section .text.entry .globl _start _start: # Arguments passed from SBI: # a0 = hart id # a1 = device tree paddr (not touched) # 1. enable paging # setting up 1st pagetable # entry = (PPN(boot_pagetable_2nd) << 10) | 0x01 # V la t1, boot_pagetable li t0, 8 * 511 add t1, t1, t0 la t0, boot_pagetable_2nd srli t0, t0, 2 ori t0, t0, 0x01 sd t0, 0(t1) la t0, boot_pagetable li t1, 9 << 60 srli t0, t0, 12 or t0, t0, t1 csrw satp, t0 sfence.vma # 2. set sp (BSP only) lga sp, boot_stack_top # 3. jump to rust riscv_boot lga t0, riscv_boot jr t0 .section .bss.stack .globl boot_stack_bottom boot_stack_bottom: .space 0x40000 # 64 KiB .globl boot_stack_top boot_stack_top: .section .data .align 12 boot_pagetable: .quad (0x00000 << 10) | 0xcf # VRWXAD .zero 8 * 255 .quad (0x00000 << 10) | 0xcf # VRWXAD .zero 8 * 254 .quad 0 # To-Be-Assign boot_pagetable_2nd: # 0x0000_00ff_8000_0000 -> 0x0000_0000_8000_0000 .zero 8 * 508 .quad (0x00000 << 10) | 0xcf # VRWXAD .quad (0x40000 << 10) | 0xcf # VRWXAD .quad (0x80000 << 10) | 0xcf # VRWXAD .quad 0
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
2,967
ostd/src/arch/x86/trap/syscall.S
/* SPDX-License-Identifier: MPL-2.0 OR MIT * * The original source code is from [trapframe-rs](https://github.com/rcore-os/trapframe-rs), * which is released under the following license: * * SPDX-License-Identifier: MIT * * Copyright (c) 2020 - 2024 Runji Wang * * We make the following new changes: * * Skip saving/restoring the fsgsbase registers. * * These changes are released under the following license: * * SPDX-License-Identifier: MPL-2.0 */ .code64 .text # extern "sysv64" fn syscall_return(&mut UserContext) .global syscall_return syscall_return: # disable interrupt cli # save callee-saved registers push r15 push r14 push r13 push r12 push rbp push rbx push rdi # keep rsp 16 bytes align mov gs:4, rsp # store kernel rsp -> TSS.sp0 mov rsp, rdi # set rsp -> UserContext # restore user gsbase swapgs pop rax pop rbx pop rcx pop rdx pop rsi pop rdi pop rbp pop r8 # skip rsp pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 # rip # rflags # fsbase # gsbase # trap_num # error_code # determain sysret or iret cmp dword ptr [rsp + 4*8], 0x100 # syscall? je sysret iret: # construct trap frame push [USER_SS] # push ss push [rsp - 8*8] # push rsp push [rsp + 3*8] # push rflags push [USER_CS] # push cs push [rsp + 4*8] # push rip iretq sysret: pop rcx # rcx = rip pop r11 # r11 = rflags mov rsp, [rsp - 11*8] # load rsp sysretq # sysretq instruction do: # - load cs, ss # - load rflags <- r11 # - load rip <- rcx .global syscall_entry syscall_entry: # syscall instruction do: # - load cs # - store rflags -> r11 # - mask rflags # - store rip -> rcx # - load rip swapgs # swap in kernel gs mov gs:12, rsp # store user rsp -> scratch at TSS.sp1 mov rsp, gs:4 # load kernel rsp <- TSS.sp0 pop rsp # load rsp <- UserContext add rsp, 21*8 # rsp -> error code of UserContext push 0x100 # push trap_num sub rsp, 16 # skip fsbase, gsbase # push general registers push r11 # push rflags push rcx # push rip .global trap_syscall_entry trap_syscall_entry: push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push gs:12 # push rsp push rbp push rdi push rsi push rdx push rcx push rbx push rax # restore callee-saved registers mov rsp, gs:4 # load kernel rsp <- TSS.sp0 pop rbx pop rbx pop rbp pop r12 pop r13 pop r14 pop r15 # go back to Rust ret
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
3,012
ostd/src/arch/x86/trap/trap.S
/* SPDX-License-Identifier: MPL-2.0 OR MIT * * The original source code is from [trapframe-rs](https://github.com/rcore-os/trapframe-rs), * which is released under the following license: * * SPDX-License-Identifier: MIT * * Copyright (c) 2020 - 2024 Runji Wang * * We make the following new changes: * * Add the `trap_handler_table`. * * These changes are released under the following license: * * SPDX-License-Identifier: MPL-2.0 */ .code64 .equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp trap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp trap_common .endif .endm .section .text _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr .section .text .global trap_common trap_common: cld # clear DF before calling/returning to any C function to conform to x86-64 calling convention push rax mov ax, [rsp + 4*8] # load cs and ax, 0x3 # test jz __from_kernel # continue trap __from_user: /* kernel stack: - ptr to UserContext - ss - rsp - rflags - cs - rip - error code - trap num - rax */ swapgs # swap in kernel gs mov rax, [rsp + 6*8] # rax = user rsp mov gs:12, rax # store user rsp -> scratch at TSS.sp1 mov rsp, [rsp + 8*8] # load rsp <- UserContext add rsp, 22*8 # rsp -> top of UserContext mov rax, gs:4 # rax = kernel stack # push trap_num, error_code push [rax - 6*8] # push error_code push [rax - 7*8] # push trap_num sub rsp, 16 # skip fsbase, gsbase # push general registers push [rax - 3*8] # push rflags push [rax - 5*8] # push rip mov rax, [rax - 8*8] # pop rax jmp trap_syscall_entry __from_kernel: /* kernel stack: - rflags - cs - rip - error code - trap num - rax */ pop rax push 0 push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 lea r8, [rsp + 13*8] push r8 # push rsp push rbp push rdi push rsi push rdx push rcx push rbx push rax mov rdi, rsp call trap_handler .global trap_return trap_return: pop rax pop rbx pop rcx pop rdx pop rsi pop rdi pop rbp pop r8 # skip rsp pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 # skip padding, trap_num, error_code add rsp, 24 iretq
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
8,513
ostd/src/arch/x86/boot/bsp_boot.S
/* SPDX-License-Identifier: MPL-2.0 */ // The boot routine executed by the bootstrap processor. // The boot header, initial boot setup code, temporary GDT and page tables are // in the boot section. The boot section is mapped writable since kernel may // modify the initial page table. .section ".bsp_boot", "awx" .code32 // With every entry types we could go through common paging or machine // state setup routines. Thus we make a mark of protocol used in each entrypoint // on the stack. ENTRYTYPE_MULTIBOOT = 1 ENTRYTYPE_MULTIBOOT2 = 2 ENTRYTYPE_LINUX_32 = 3 ENTRYTYPE_LINUX_64 = 4 MULTIBOOT_ENTRY_MAGIC = 0x2BADB002 MULTIBOOT2_ENTRY_MAGIC = 0x36D76289 KERNEL_VMA = 0xffffffff80000000 // The Linux 32-bit Boot Protocol entry point. // Must be located at 0x8001000, ABI immutable! .code32 .org 0x000 .global __linux32_boot __linux32_boot: cli cld // Set the kernel call stack. mov esp, offset boot_stack_top push 0 // upper 32-bits push esi // boot_params ptr push 0 // upper 32-bits push ENTRYTYPE_LINUX_32 jmp initial_boot_setup // The Linux 64-bit Boot Protocol entry point. // Must be located at 0x8001200, ABI immutable! .code64 .org 0x200 .global __linux64_boot_tag __linux64_boot_tag: // Set the kernel call stack. lea rsp, [boot_stack_top] push rsi // boot_params ptr from the loader push ENTRYTYPE_LINUX_64 // Here RSP/RIP are still using low address. jmp long_mode_in_low_address // The multiboot & multiboot2 entry point. .code32 .global __multiboot_boot __multiboot_boot: cli cld // Set the kernel call stack. mov esp, offset boot_stack_top push 0 // Upper 32-bits. push eax // multiboot magic ptr push 0 // Upper 32-bits. push ebx // multiboot info ptr // Tell the entry type from eax cmp eax, MULTIBOOT_ENTRY_MAGIC je magic_is_mb cmp eax, MULTIBOOT2_ENTRY_MAGIC je magic_is_mb2 jmp halt // Should not be reachable! magic_is_mb: push 0 // Upper 32-bits. push ENTRYTYPE_MULTIBOOT jmp initial_boot_setup magic_is_mb2: push 0 // Upper 32-bits. push ENTRYTYPE_MULTIBOOT2 jmp initial_boot_setup initial_boot_setup: // Prepare for far return. We use a far return as a fence after setting GDT. mov eax, 24 push eax lea edx, [protected_mode] push edx // Switch to our own temporary GDT. lgdt [boot_gdtr] retf protected_mode: mov ax, 16 mov ds, ax mov ss, ax mov es, ax mov fs, ax mov gs, ax page_table_setup: // Zero out the page table. mov al, 0x00 lea edi, [boot_page_table_start] lea ecx, [boot_page_table_end] sub ecx, edi cld rep stosb // PTE flags used in this file. PTE_PRESENT = (1) PTE_WRITE = (1 << 1) PTE_HUGE = (1 << 7) PTE_GLOBAL = (1 << 8) // PML4: 0x00000000_00000000 ~ 0x00000000_3fffffff // 0x00000000_40000000 ~ 0x00000000_7fffffff // 0x00000000_80000000 ~ 0x00000000_bfffffff // 0x00000000_c0000000 ~ 0x00000000_ffffffff lea edi, [boot_pml4] lea eax, [boot_pdpt + (PTE_PRESENT | PTE_WRITE)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PML4: 0xffff8000_00000000 ~ 0xffff8000_3fffffff // 0xffff8000_40000000 ~ 0xffff8000_7fffffff // 0xffff8000_80000000 ~ 0xffff8000_bfffffff // 0xffff8000_c0000000 ~ 0xffff8000_ffffffff // 0xffff8008_00000000 ~ 0xffff8008_3fffffff lea edi, [boot_pml4 + 0x100 * 8] lea eax, [boot_pdpt + (PTE_PRESENT | PTE_WRITE)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PML4: 0xffffffff_80000000 ~ 0xffffffff_bfffffff // 0xffffffff_c0000000 ~ 0xffffffff_ffffffff lea edi, [boot_pml4 + 0x1ff * 8] lea eax, [boot_pdpt + (PTE_PRESENT | PTE_WRITE)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_00000000 ~ 0x00000000_3fffffff lea edi, [boot_pdpt] lea eax, [boot_pd_0g_1g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_40000000 ~ 0x00000000_7fffffff lea edi, [boot_pdpt + 0x1 * 8] lea eax, [boot_pd_1g_2g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_80000000 ~ 0x00000000_bfffffff lea edi, [boot_pdpt + 0x2 * 8] lea eax, [boot_pd_2g_3g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_c0000000 ~ 0x00000000_ffffffff lea edi, [boot_pdpt + 0x3 * 8] lea eax, [boot_pd_3g_4g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0xffffffff_80000000 ~ 0xffffffff_bfffffff lea edi, [boot_pdpt + 0x1fe * 8] lea eax, [boot_pd_0g_1g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0xffffffff_c0000000 ~ 0xffffffff_ffffffff lea edi, [boot_pdpt + 0x1ff * 8] lea eax, [boot_pd_1g_2g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // Page Directory: map to low 1 GiB * 4 space lea edi, [boot_pd] mov eax, PTE_PRESENT | PTE_WRITE | PTE_GLOBAL | PTE_HUGE mov ecx, 512 * 4 // (of entries in PD) * (number of PD) write_pd_entry: mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 add eax, 0x200000 // +2MiB add edi, 8 loop write_pd_entry jmp enable_long_mode enable_long_mode: // Enable PAE and PGE. mov eax, cr4 or eax, 0xa0 mov cr4, eax // Set the page table address. lea eax, [boot_pml4] mov cr3, eax // Enable long mode. mov ecx, 0xc0000080 rdmsr or eax, 0x0100 wrmsr // Prepare for far return. mov eax, 8 push eax lea edx, [long_mode_in_low_address] push edx // Enable paging. mov eax, cr0 or eax, 0x80000000 mov cr0, eax retf // Temporary GDTR/GDT entries. This must be located in the .boot section as its // address (gdt) must be physical to load. .align 16 .global boot_gdtr boot_gdtr: .word gdt_end - gdt - 1 .quad gdt .align 16 gdt: .quad 0x0000000000000000 // 0: null descriptor .quad 0x00af9a000000ffff // 8: 64-bit code segment (kernel) .quad 0x00cf92000000ffff // 16: 64-bit data segment (kernel) .quad 0x00cf9a000000ffff // 24: 32-bit code segment (kernel) gdt_end: // The page tables and the stack .align 4096 .global boot_page_table_start boot_page_table_start: boot_pml4: .skip 4096 boot_pdpt: .skip 4096 boot_pd: boot_pd_0g_1g: .skip 4096 boot_pd_1g_2g: .skip 4096 boot_pd_2g_3g: .skip 4096 boot_pd_3g_4g: .skip 4096 boot_page_table_end: .global boot_stack_top boot_stack_bottom: .skip 0x40000 boot_stack_top: .code64 long_mode_in_low_address: mov ax, 0 mov ds, ax mov ss, ax mov es, ax mov fs, ax mov gs, ax // Update RSP/RIP to use the virtual address. mov rbx, KERNEL_VMA or rsp, rbx mov rax, offset long_mode jmp rax // From here, we're in the .text section: we no longer use physical address. .text .code64 long_mode: // Clear .bss section. mov al, 0x00 lea rdi, [rip + __bss] lea rcx, [rip + __bss_end] sub rcx, rdi cld rep stosb // Call the corresponding Rust entrypoint according to the boot entrypoint pop rax cmp rax, ENTRYTYPE_MULTIBOOT je entry_type_multiboot cmp rax, ENTRYTYPE_MULTIBOOT2 je entry_type_multiboot2 cmp rax, ENTRYTYPE_LINUX_32 je entry_type_linux cmp rax, ENTRYTYPE_LINUX_64 je entry_type_linux // Unreachable! jmp halt .extern __linux_boot .extern __multiboot_entry .extern __multiboot2_entry entry_type_linux: pop rdi // boot_params ptr xor rbp, rbp lea rax, [rip + __linux_boot] // jump into Rust code call rax jmp halt entry_type_multiboot: pop rsi // the address of multiboot info pop rdi // multiboot magic xor rbp, rbp lea rax, [rip + __multiboot_entry] // jump into Rust code call rax jmp halt entry_type_multiboot2: pop rsi // the address of multiboot info pop rdi // multiboot magic xor rbp, rbp lea rax, [rip + __multiboot2_entry] // jump into Rust code call rax jmp halt halt: cli hlt jmp halt
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
3,565
ostd/src/arch/x86/boot/ap_boot.S
/* SPDX-License-Identifier: MPL-2.0 */ // The boot routine executed by the application processor. .extern boot_gdtr .extern boot_page_table_start .extern ap_early_entry .section ".ap_boot", "awx" .align 4096 .code16 IA32_APIC_BASE = 0x1B IA32_X2APIC_APICID = 0x802 MMIO_XAPIC_APICID = 0xFEE00020 ap_real_mode_boot: cli // disable interrupts cld xor ax, ax // clear ax mov ds, ax // clear ds lgdt [ap_gdtr] // load gdt mov eax, cr0 or eax, 1 mov cr0, eax // enable protected mode ljmp 0x8, offset ap_protect_entry // 32-bit AP GDT. .align 16 ap_gdt: .quad 0x0000000000000000 ap_gdt_code: .quad 0x00cf9a000000ffff ap_gdt_data: .quad 0x00cf92000000ffff ap_gdt_end: .align 16 ap_gdtr: .word ap_gdt_end - ap_gdt - 1 .quad ap_gdt .align 4 .code32 ap_protect_entry: mov ax, 0x10 mov ds, ax mov ss, ax // Get the local APIC ID from xAPIC or x2APIC. // It is better to get this information in protected mode. // After entering long mode, we need to set additional page // table mapping for xAPIC mode mmio region. // Tell if it is xAPIC or x2APIC. // IA32_APIC_BASE register: // bit 8: BSP—Processor is BSP // bit 10: EXTD—Enable x2APIC mode // bit 11: EN—xAPIC global enable/disable // bit 12-35: APIC Base—Base physical address mov ecx, IA32_APIC_BASE rdmsr and eax, 0x400 // check EXTD bit cmp eax, 0x400 je x2apic_mode xapic_mode: // In xAPIC mode, the local APIC ID is stored in // the MMIO region. mov eax, [MMIO_XAPIC_APICID] shr eax, 24 jmp ap_protect x2apic_mode: // In x2APIC mode, the local APIC ID is stored in // IA32_X2APIC_APICID MSR. mov ecx, IA32_X2APIC_APICID rdmsr jmp ap_protect // This is a pointer to the page table used by the APs. // The BSP will fill this pointer before kicking the APs. .global __boot_page_table_pointer .align 4 __boot_page_table_pointer: .skip 4 ap_protect: // Save the local APIC ID in an unused register. // We will calculate the stack pointer of this core // by taking the local apic id as the offset. mov edi, eax // Now we try getting into long mode. // Use the 64-bit GDT. lgdt [boot_gdtr] // Enable PAE and PGE. mov eax, cr4 or eax, 0xa0 mov cr4, eax // Set the page table. The application processors use // the same page table as the bootstrap processor's // boot phase page table. mov eax, __boot_page_table_pointer mov cr3, eax // Enable long mode. mov ecx, 0xc0000080 rdmsr // load EFER MSR or eax, 1 << 8 wrmsr // set long bit // Enable paging. mov eax, cr0 or eax, 1 << 31 mov cr0, eax ljmp 0x8, offset ap_long_mode_in_low_address .code64 ap_long_mode_in_low_address: mov ax, 0 mov ds, ax mov ss, ax mov es, ax mov fs, ax mov gs, ax // Update RIP to use the virtual address. mov rax, offset ap_long_mode jmp rax .data // This is a pointer to be filled by the BSP when boot stacks // of all APs are allocated and initialized. .global __ap_boot_stack_array_pointer .align 8 __ap_boot_stack_array_pointer: .skip 8 .text .code64 ap_long_mode: // The local APIC ID is in the RDI. mov rax, rdi shl rax, 3 // Setup the stack. mov rbx, [__ap_boot_stack_array_pointer] mov rsp, [rbx + rax] xor rbp, rbp // Go to Rust code. mov rax, offset ap_early_entry call rax .extern halt # bsp_boot.S jmp halt
Shenwenkun/Trusted-AI-based-on-Rust-confidential-operating-system
1,253
ostd/src/arch/x86/boot/multiboot2/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // This is the GNU Multiboot 2 header. // Reference: https://www.gnu.org/software/grub/manual/multiboot2/html_node/Index.html//Index .section ".multiboot2_header", "a" .code32 // Macros for cleaner code in the header fields. MB2_MAGIC = 0xE85250D6 MB2_ARCHITECTURE = 0 // 32-bit (protected) mode of i386 MB2_HEADERLEN = header_end - header_start MB2_CHECKSUM = -(MB2_MAGIC + MB2_ARCHITECTURE + MB2_HEADERLEN) header_start: .align 8 .long MB2_MAGIC .long MB2_ARCHITECTURE .long MB2_HEADERLEN .long MB2_CHECKSUM // Tag: entry address entry_address_tag_start: .short 3 .short 1 // Optional .long entry_address_tag_end - entry_address_tag_start .extern __multiboot_boot .long __multiboot_boot // entry_addr entry_address_tag_end: // Tag: information request .align 8 info_request: .short 1 .short 0 // Required .long info_request_end - info_request .long 6 // Memory map request .long 15 // ACPI (new) request info_request_end: // Tag: header end .align 8 .short 0 // type: tags end .short 0 // flags .long 8 // size header_end:
ShuoAndy/rcore_lab3_addition
1,640
os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->*TrapContext in user space, sscratch->user stack # save other general purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they have been saved in TrapContext csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it in TrapContext csrr t2, sscratch sd t2, 2*8(sp) # load kernel_satp into t0 ld t0, 34*8(sp) # load trap_handler into t1 ld t1, 36*8(sp) # move to kernel_sp ld sp, 35*8(sp) # switch to kernel space csrw satp, t0 sfence.vma # jump to trap_handler jr t1 __restore: # a0: *TrapContext in user space(Constant); a1: user space token # switch to user space csrw satp, a1 sfence.vma csrw sscratch, a0 mv sp, a0 # now sp points to TrapContext in user space, start restoring based on it # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 # restore general purpose registers except x0/sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # back to user stack ld sp, 2*8(sp) sret
Shunxu-H/oak-experiment
1,167
oak_restricted_kernel_wrapper/src/asm/boot.s
/* * Copyright 2023 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .boot, "ax" .global _wrapper_entry .code64 _wrapper_entry: # At this point we expect to have a valid page table identity mapping (at least) the lowest 1G # of physical memory. # # Note: don't touch %rsi, as that contains the address of the zero page. # Set up the new stack. lea stack_start(%rip), %rsp # Push 8 bytes to fix stack alignment issue. Because we enter rust64_start with a jmp rather # than a call the function prologue means that the stack is no longer 16-byte aligned. push $0 jmp rust64_start
Shunxu-H/oak-experiment
4,172
stage0_bin_tdx/src/asm/tdx.s
.align 16 .section .tdx.bootstrap, "ax" .code32 .global ap_start ap_start: hlt .global _begin_of_tdx _begin_of_tdx: # VCPU_INDEX is in esi cli andl $0x3f, %ebx # [6:0] GPAW movl %ebx, %ebp movl $gdt_desc_offset, %ebx lgdtl (%ebx) movl $0x00000023, %eax # SEC_DEFAULT_CR0 movl %eax, %cr0 ljmpl $cs32, $_tdx_32bit_long_mode_start _tdx_32bit_long_mode_start: movl $0x640, %eax # SEC_DEFAULT_CR4 movl %eax, %cr4 movl $ds, %eax movl %eax, %ds movl %eax, %es movl %eax, %fs movl %eax, %gs movl %eax, %ss # Skip BFV check # Skip UEFI SEC setup # Note that no matter what the GPAW is, we only use 4-level # paging in stage0. Linux will set up its own PTs later. movl %cr4, %eax bts $0x05, %eax # PAE movl %eax, %cr4 # page tables are set in the linker script movl $bios_pml4, %ecx movl %ecx, %cr3 # No need for rdmsr/wrmsr # Protected mode + paging mov %cr0, %eax or $0x80000001, %eax # set PG mov %eax, %cr0 # Reload CS, enter long mode, jump to 64-bit code. ljmpl $cs, $_tdx_64bit_start .align 16 .code64 _tdx_64bit_start: # Clean up data segments. movw $ds, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss # Park the APs test %esi, %esi jnz _park_ap_64bit # BSP re-creates a set of page table in ram_low # Clear BSS: base address goes to EDI, value (0) goes to EAX, # count goes into ECX. Page tables will be located in BSS movl $bss_start, %edi movl $bss_size, %ecx xorl %eax, %eax rep stosb # Set the first entry of PML4 to point to PDPT (0..512GiB). movl ${pdpt}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) movl %esi, ({pml4}) # set first half of PML4[0] # Set the first entry of PDPT to point to PD_0 (0..1GiB). movl ${pd_0}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) movl %esi, ({pdpt}) # set first half of PDPT[0] # Set the fourth entry of PDPT to point to PD_3 (3..4GiB). movl ${pdpt}, %eax movl ${pd_3}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) movl %esi, 24(%eax) # set first half of PDPT[3], each entry is 8 bytes # Set the first entry of PD_0 to point to and identity mapped huge page (0..2MiB). movl $0x83, %esi # esi = 0x0 | 131 (PRESENT and WRITABLE and HUGE_PAGE) movl %esi, ({pd_0}) # set first half of PD_0[0] # Set the last entry of PD_3 to point to an identity-mapped 2MiB huge page ((4GiB-2MiB)..4GiB). # This is where the firmware ROM image is mapped, so we don't make it writable. movl ${pd_3}, %eax movl $0xFFE00000, %esi # address of 4GiB-2MiB orl $0x81, %esi # esi |= 129 (PRESENT and HUGE_PAGE) movl %esi, 0xFF8(%eax) # set first half of PML4[511], each entry is 8 bytes # Reload PML4 to use the writable PML4 movq ${pml4}, %rax movq %rax, %cr3 # Copy DATA from the ROM image (stored just after TEXT) to # the expected location. Source address goes to ESI, destination # goes to EDI, count goes to ECX. movl $text_end, %esi movl $data_start, %edi movl $data_size, %ecx rep movsd # Set up the stack. Stack now is in ram_low movl $stack_start, %esp push $0 # Set GPAW movl %ebp, (GPAW) # ...and jump to Rust code. jmp rust64_start _park_ap_64bit: leaq (AP_IN_64BIT_COUNT), %rcx lock incq (%rcx) # %esi stores the VCPU_INDEX movl %esi, %ebp # save the VCPU_INDEX movq $1, %rax # TDCALL_TDINFO tdcall # R8 [31:0] NUM_VCPUS # [63:32] MAX_VCPUS # R9 [31:0] VCPU_INDEX _inner_loop: # On entering the inner loop, APs are using the hard-coded page # tables from ROM. Before wake up the aps we need to reload cr3 # for APs. # Finally we will need to call ap_wakeup_vector with # VCPU_INDEX as the first argument like # fn ap_wakeup_vector(vcpu_index: u64); # movq $r9, $rdi # call ap_wakeup_vector pause jmp _inner_loop
Shunxu-H/oak-experiment
3,195
oak_restricted_kernel/src/boot/boot.s
/* * Copyright 2022 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .boot, "ax" .global _oak_start .code64 _oak_start: # At this point we expect to have a valid page table identity mapping (at least) the lowest 1G # of physical memory; that means that the first PML4 entry must point to a valid PDP, and the # first entry of that PDP must point to a valid PD. # Our goal is to map the first (physical) gigabyte to -2 GB in virtual address space; thus, we # need to make the last entry of the PML4 (covering the last 256T) point to a PDP, and the # second-to-last entry in that PDP point to the same PD as the PD in the lower half. # # We can reuse the existing data structures to achieve that goal. By pointing the last entry # of PML4 to the same PD as the first entry, and setting the second-to-last entry of that PD # to be the same as the first, we get our desired effect of mapping physical address 0x0 to # virtual address 0xFFFFFFFF80000000. As a side effect, this will map physical address 0x0 to # virtual address 0x0000007F80000000 (510*1G) as well, but that's fine. We'll be rewriting # the page tables soon after jumping to the kernel anyway. # # Note: don't touch %rsi, as that contains the address of the zero page. # Map the last entry of PML4 to the same location as the first. movq %cr3, %rbx # rbx = cr3 movq (%rbx), %rax # rax = *rbx movq %rax, 4088(%rbx) # rbx[511] = rax # Map the last entry of PDP to the same location as the first. # We're ignoring bit 51 (as that's commonly the encrypted bit). movabsq $0x0007FFFFFFFFF000, %rax # rax = $const andq (%rbx), %rax # rax = *rbx & rax (mask out all but the address) movq (%rax), %rdx # rdx = *rax movq %rdx, 4080(%rax) # rax[510] = rdx # Enable PGE (https://wiki.osdev.org/CPU_Registers_x86-64#CR4) movq %cr4, %rax orq $0b10000000, %rax movq %rax, %cr4 # Finally, trigger a full TLB flush by overwriting CR3, even if it is the same value. movq %rbx, %cr3 # Clear BSS: base address goes to RDI, value (0) goes to AX, count goes into CX. # Set Direction Dlag (DF) to 0 for the address to increment (not decrement) after each rep of stosb. cld mov $bss_start, %rdi mov $bss_size, %rcx xor %rax, %rax rep stosb mov $stack_start, %rsp # Push 8 bytes to fix stack alignment issue. Because we enter rust64_start with a jmp rather # than a call the function prologue means that the stack is no longer 16-byte aligned. push $0 jmp rust64_start
Shunxu-H/oak-experiment
2,005
stage0_bin/src/asm/ap_boot.s
.code16 .section .ap_text, "ax" # Entry point for APs. This needs to be page-aligned. .align 4096 .global ap_start ap_start: # Let the BSP know we're alive. lock incl (LIVE_AP_COUNT) 1: hlt jmp 1b # Under SEV-ES, we need to use the AP Reset Hold and AP Jump Tables. We could munge all of it into # `ap_start` above, but it's simpler to keep it separate as if we ever run this code we know we're # under SEV-ES without risking any exceptions (and thus avoid the need for an IDT). .global sev_es_start sev_es_start: lock incl (LIVE_AP_COUNT) 1: xor %edx, %edx # EDX = 0x0 mov $0x006, %eax # EAX = 0x007 - AP Reset Hold mov $0xC0010130, %ecx # ECX = 0xC001_0130 -- GHCB MSR wrmsr # MSR[ECX] = EDX:EAX rep vmmcall # VMGEXIT rdmsr # EDX:EAX = MSR[ECX] # Check return value: GHCBData[63:12] must be non-zero, GHCBData[12:0] must be 0x007 mov %eax, %ebx # EBX = EAX and $0xFFF, %ebx # EBX |= 0xFFF (leave lowest 12 bits) cmp $0x007, %ebx # is the response AP Reset Hold Response? jne 1b # No. Go back to sleep. and $-0xFFF, %eax # EAX |= ~0xFFF (mask lowest 12 bits) add %edx, %eax # EAX += EDX test %eax, %eax # is GHDBData[63:12] zero? je 1b # Yes. Go back to sleep. # Determine where to jump from the AP Jump Table and off we go # First is IP, second is CS mov $AP_JUMP_TABLE, %sp # treat the jump table as stack iret # pop IP, pop CS, pop EFLAGS # if we're still here, something has gone wrong xor %edx, %edx # EDX = 0x0 mov $0x100, %eax # EAX = 0x100 - Termination Request mov $0xC0010130, %ecx # ECX = 0xC001_0130 -- GHCB MSR wrmsr # MSR[ECX] = EDX:EAX rep vmmcall # VMGEXIT 1: # If we're still alive, just go into a HLT loop. hlt jmp 1b
Shunxu-H/oak-experiment
9,897
stage0_bin/src/asm/boot.s
.code16 .align 16 .section .text16, "ax" .global _start _start : # Enter long mode. This code is inspired by the approach shown at # https://wiki.osdev.org/Entering_Long_Mode_Directly cli xor %eax, %eax mov %eax, %cr3 # Set up descriptors mov $gdt_desc_offset, %si lgdtl %cs:(%si) mov $idt_desc_offset, %si lidtl %cs:(%si) # Enter protected mode, but don't enable paging. mov %cr0, %eax or $1, %eax mov %eax, %cr0 ljmpl $cs32, $_protected_mode_start .align 16 .code32 .global gp_handler gp_handler: pop %eax # ignore the error code for now pop %eax # pop the return address cmpw $0x320F, (%eax) # are we trying to execute RDMSR? jne 2f # if not, skip ahead add $2, %eax # increment it by 2 (size of the RDMSR instruction) push %eax # push it back on stack for iret xor %eax, %eax # zero out RAX xor %edx, %edx # zero out RDX iret # go back 2: # this wasn't because RDMSR int $8 # trigger a double fault and crash .global vc_handler # Really limited #VC handler that only knows how to fill in EBX in case of CPUID. # As CPUID can alter EAX, EBX, ECX and EDX we zero out the other three registers. vc_handler: pop %ebx # get the error code cmp $0x72, %ebx # is this about CPUID? jne 2f # if not, skip ahead and crash mov (%esp), %ebx # get the instruction pointer cmpw $0xa20f, (%ebx) # was this really a CPUID instruction? jne 2f # if not it might be injected by the hypervisor, skip ahead and crash cmp $0x0, %ecx # are we asked for a CPUID subleaf? jne 2f # if yes, skip ahead, as we don't support subleaves # Use the GHCB MSR protocol to request one page of CPUID information. The protocol itself is # described in Section 2.3.1 of SEV-ES Guest-Hypervisor Communication Block Standardization spec. mov %eax, %edx # EDX = EAX (move the CPUID function number to GHCBData[63:32]) mov $0x40000004, %eax # EAX = Request EBX (0b01 << 30) | CPUID Request (0x004) mov $0xC0010130, %ecx # ECX = 0xC001_0130 -- GHCB MSR wrmsr # MSR[ECX] = EDX:EAX rep vmmcall # VMGEXIT rdmsr # EDX:EAX = MSR[ECX] cmp $0x40000005, %eax # EAX should contain EBX data (0b01 << 30) | CPUID Response (0x005) jne 2f # if not, crash addl $2, (%esp) # move return address forward past the CPUID instruction xor %eax, %eax # EAX = 0 mov %edx, %ebx # EBX = EDX (that's where the cpuid value is) xor %ecx, %ecx # ECX = 0 xor %edx, %edx # EDX = 0 iret # go back 2: # this wasn't because CPUID or the response wasn't what we expected int $8 # trigger double fault and crash _protected_mode_start: # Switch to a flat 32-bit data segment, giving us access to all 4G of memory. mov $ds, %eax mov %eax, %ds mov %eax, %es # needed for destination of stosb and movsb mov %eax, %ss # Set up a basic stack, as we may get interrupts. mov $stack_start, %esp # Determine if we're running under SEV. # Keep track of whether encryption is enabled in %ebp. mov $0xc0010131, %ecx # SEV_STATUS MSR. See Section 15.34.10 in AMD64 Architecture Programmer's # Manual, Volume 2 for more details. rdmsr # EDX:EAX <- MSR[ECX] push %edx # Store the raw result for future use on the stack. push %eax and $0b111, %eax # eax &= 0b111; # Bit 0 - SEV enabled # Bit 1 - SEV-ES enabled # Bit 2 - SEV-SNP active mov %eax, %ebp # store the result in EBP for later use # See if we're under SEV-SNP, and if yes, pre-emptively PVALIDATE the first 640 KiB of memory, # as that's where we'll be storing many data structures. and $0b100, %eax # eax &= 0b100; -- SEV-SNP active test %eax, %eax # is eax zero? je 2f # if yes, no SNP, skip validation and jump ahead mov $0x0000, %ebx # ebx = 0x0000 -- start address xor %ecx, %ecx # ecx = 0 -- we're using 4K pages mov $0b1, %edx # edx = 1 -- set RMP VALIDATED bit 1: mov %ebx, %eax # eax = ebx (PVALIDATE will clobber EAX) pvalidate # set validated bit in RMP, but ignore results for now add $0x1000, %ebx # ebx += 0x1000 cmp $0xa0000, %ebx # have we covered the full 640 KiB? jl 1b # if no, go back 2: # Clear BSS: base address goes to EDI, value (0) goes to EAX, count goes into ECX. mov $bss_start, %edi mov $bss_size, %ecx xor %eax, %eax rep stosb mov $ap_bss_start, %edi mov $ap_bss_size, %ecx xor %eax, %eax rep stosb # now that BSS is set up, initialize the raw Rust variables pop %eax pop %edx mov %eax, (SEV_STATUS) # Initialize the SEV_STATUS static variable in Rust. mov %edx, (SEV_STATUS+4) # Copy DATA from the ROM image (stored just after TEXT) to the expected location. # Source address goes to ESI, destination goes to EDI, count goes to ECX. mov $text_end, %esi mov $data_start, %edi mov $data_size, %ecx rep movsb # Copy AP bootstrap code to the expected location, similar to DATA above. mov $0xFFFFF000, %esi mov $ap_text_start, %edi mov $ap_text_size, %ecx rep movsb # Set the first entry of PML4 to point to PDPT (0..512GiB). mov ${pdpt}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) mov %esi, ({pml4}) # set first half of PML4[0] # Set the first entry of PDPT to point to PD_0 (0..1GiB). mov ${pd_0}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) mov %esi, ({pdpt}) # set first half of PDPT[0] # Set the fourth entry of PDPT to point to PD_3 (3..4GiB). mov ${pdpt}, %eax mov ${pd_3}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) mov %esi, 24(%eax) # set first half of PDPT[3], each entry is 8 bytes # Set the first entry of PD_0 to point to and identity mapped huge page (0..2MiB). mov $0x83, %esi # esi = 0x0 | 131 (PRESENT and WRITABLE and HUGE_PAGE) mov %esi, ({pd_0}) # set first half of PD_0[0] # Set the last entry of PD_3 to point to an identity-mapped 2MiB huge page ((4GiB-2MiB)..4GiB). # This is where the firmware ROM image is mapped, so we don't make it writable. mov ${pd_3}, %eax mov $0xFFE00000, %esi # address of 4GiB-2MiB orl $0x81, %esi # esi |= 129 (PRESENT and HUGE_PAGE) mov %esi, 0xFF8(%eax) # set first half of PML4[511], each entry is 8 bytes # Clear EDI, since we will use it later as the encrypted bit location to pass # into the 64-bit Rust entry point and by default we assume no encryption. xor %edi, %edi # Check whether encryption is enabled. The SEV status is stored in %ebp. test %ebp, %ebp # is it zero? je no_encryption # if yes, jump to no_encryption # Memory encryption enabled: set encrypted bits in the page tables. # First, determine the location of the C-bit in the page tables. # Keep track of which bit is the encrypted bit in EDI. mov $0x8000001F, %eax # EAX = Fn8000_001F - Encrypted Memory Capabilities xor %ecx, %ecx # ECX = 0 - we're not interested in a subpage cpuid # EAX, EBX, ECX, EDX = CPUID(EAX, ECX) and $0b111111, %ebx # zero out all but EBX[5:0], which the C-bit location mov %ebx, %edi # save the full C-bit location value for later to pass into the Rust # entry point (RDI contains the first argument according to sysv ABI) sub $32, %ebx # let's assume the encrypted bit is > 32, as it simplifies logic below mov $1, %esi mov %ebx, %ecx shl %cl, %esi # construct the encrypted bit mask, store it in ESI movl $0, (ENCRYPTED) # ... and store it in the ENCRYPTED variable as well mov %esi, (ENCRYPTED+4) # (lower half zeroed out as we expect it to be > 32, as above) # We set the encrypted bit for each of the page table entries that we previously created. # The encrypted bit is in the second half of each 8-byte entry, so we add an extra offset of 4 bytes. mov ${pml4}, %eax mov %esi, 4(%eax) # set second half of PML4[0] mov ${pdpt}, %eax mov %esi, 4(%eax) # set second half of PDPT[0] mov %esi, 28(%eax) # set second half of PDPT[3], each entry is 8 bytes mov ${pd_0}, %eax mov %esi, 4(%eax) # set second half of PD_0[0] mov ${pd_3}, %eax mov %esi, 0xFFC(%eax) # set second half of PD_3[511], each entry is 8 bytes no_encryption: # Load PML4 mov ${pml4}, %eax mov %eax, %cr3 # PAE mov $0b100000, %eax mov %eax, %cr4 # Read EFER, enable LME mov $0xC0000080, %ecx rdmsr or $0x00000100, %eax wrmsr # Protected mode + paging mov %cr0, %eax or $0x80000001, %eax mov %eax, %cr0 # Reload CS, enter long mode, jump to 64-bit code. ljmpl $cs, $_long_mode_start .align 16 .code64 _long_mode_start: # Clean up data segments. movw $ds, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss # Set up the stack. mov $stack_start, %esp push $0 # ...and jump to Rust code. jmp rust64_start
sigongzi/riscv-isa-vector
3,107
tests/corpus/rv32c.s
# Copyright James Wainwright # # SPDX-License-Identifier: MPL-2.0 c.lwsp x1, 0(x2) # 4082 c.lwsp x9, 44(x2) # 54b2 c.lwsp x31, 128(x2) # 4f8a c.swsp x1, 0(x2) # c006 c.swsp x9, 44(x2) # d626 c.swsp x31, 128(x2) # c17e c.lw x8, 0(x8) # 4000 c.lw x10, 36(x13) # 52c8 c.lw x15, 64(x15) # 43bc c.sw x8, 0(x8) # c000 c.sw x10, 36(x13) # d2c8 c.sw x15, 64(x15) # c3bc c.addi x1, 1 # 0085 c.addi x13, 27 # 06ed c.addi x31, 31 # 0ffd c.addi x31, -32 # 1f81 c.j 0 # a001 c.j 484 # a2d5 c.j -486 # bd29 c.j 2046 # affd c.j -2048 # b001 c.jal 0 # 2001 c.jal 484 # 22d5 c.jal -486 # 3d29 c.jal 2046 # 2ffd c.jal -2048 # 3001 c.jr x1 # 8082 c.jr x27 # 8d82 c.jr x31 # 8f82 c.jalr x1 # 9082 c.jalr x27 # 9d82 c.jalr x31 # 9f82 c.beqz x8, 0 # c001 c.beqz x9, 42 # c48d c.beqz x10, -44 # d971 c.beqz x15, 254 # cffd c.beqz x15, -256 # d381 c.bnez x8, 0 # e001 c.bnez x9, 42 # e48d c.bnez x10, -44 # f971 c.bnez x15, 254 # effd c.bnez x15, -256 # f381 c.li x1, 0 # 4081 c.li x13, 27 # 46ed c.li x17, -27 # 5895 c.li x31, 31 # 4ffd c.li x31, -32 # 5f81 c.lui x1, 1 # 6085 c.lui x13, 27 # 66ed c.lui x31, 31 # 6ffd c.lui x31, 1048575 # 7ffd c.addi x1, 0 # 0081 c.addi x13, 13 # 06b5 c.addi x17, -13 # 18cd c.addi x31, 31 # 0ffd c.addi x31, -32 # 1f81 c.addi16sp x2, 16 # 6141 c.addi16sp x2, 48 # 6145 c.addi16sp x2, -512 # 7101 c.addi16sp x2, 496 # 617d c.addi4spn x8, x2, 4 # 0040 c.addi4spn x12, x2, 248 # 19b0 c.addi4spn x15, x2, 1020 # 1ffc c.slli x1, 1 # 0086 c.slli x13, 27 # 06ee c.slli x31, 31 # 0ffe c.srli x8, 1 # 8005 c.srli x9, 16 # 80c1 c.srli x15, 31 # 83fd c.srai x8, 1 # 8405 c.srai x9, 16 # 84c1 c.srai x15, 31 # 87fd c.andi x8, 0 # 8801 c.andi x10, 5 # 8915 c.andi x12, -5 # 9a6d c.andi x15, 31 # 8bfd c.andi x15, -32 # 9b81 c.mv x1, x1 # 8086 c.mv x5, x17 # 82c6 c.mv x31, x31 # 8ffe c.add x1, x1 # 9086 c.add x5, x17 # 92c6 c.add x31, x31 # 9ffe c.or x8, x8 # 8c41 c.or x12, x13 # 8e55 c.or x15, x15 # 8fdd c.xor x8, x8 # 8c21 c.xor x12, x13 # 8e35 c.xor x15, x15 # 8fbd c.sub x8, x8 # 8c01 c.sub x12, x13 # 8e15 c.sub x15, x15 # 8f9d unimp # 0000 c.nop # 0001 c.ebreak # 9002
sigongzi/riscv-isa-vector
1,288
tests/corpus/rv32a.s
# Copyright James Wainwright # # SPDX-License-Identifier: MPL-2.0 lr.w x0, (x0) # 1000202f lr.w x4, (x17) # 1008a22f lr.w x31, (x31) # 100fafaf sc.w x0, x0, (x0) # 1800202f sc.w x4, x13, (x27) # 18dda22f sc.w x31, x31, (x31) # 19ffafaf amoswap.w x0, x0, (x0) # 0800202f amoswap.w x4, x13, (x27) # 08dda22f amoswap.w x31, x31, (x31) # 09ffafaf amoadd.w x0, x0, (x0) # 0000202f amoadd.w x4, x13, (x27) # 00dda22f amoadd.w x31, x31, (x31) # 01ffafaf amoxor.w x0, x0, (x0) # 2000202f amoxor.w x4, x13, (x27) # 20dda22f amoxor.w x31, x31, (x31) # 21ffafaf amoand.w x0, x0, (x0) # 6000202f amoand.w x4, x13, (x27) # 60dda22f amoand.w x31, x31, (x31) # 61ffafaf amoor.w x0, x0, (x0) # 4000202f amoor.w x4, x13, (x27) # 40dda22f amoor.w x31, x31, (x31) # 41ffafaf amomin.w x0, x0, (x0) # 8000202f amomin.w x4, x13, (x27) # 80dda22f amomin.w x31, x31, (x31) # 81ffafaf amomax.w x0, x0, (x0) # a000202f amomax.w x4, x13, (x27) # a0dda22f amomax.w x31, x31, (x31) # a1ffafaf amominu.w x0, x0, (x0) # c000202f amominu.w x4, x13, (x27) # c0dda22f amominu.w x31, x31, (x31) # c1ffafaf amomaxu.w x0, x0, (x0) # e000202f amomaxu.w x4, x13, (x27) # e0dda22f amomaxu.w x31, x31, (x31) # e1ffafaf