repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
Cethannel/ros | 171 | src/asm/trap.S | # trap.S
# In the future our trap vector will go here.
.global asm_trap_vector
# This will be our trap vector when we start
# handling interrupts.
asm_trap_vector:
mret
|
Cethannel/ros | 738 | src/asm/mem.S | // mem.S
// Importation of linker symbols
.section .rodata
.global HEAP_START
HEAP_START: .dword _heap_start
.global HEAP_SIZE
HEAP_SIZE: .dword _heap_size
.global TEXT_START
TEXT_START: .dword _text_start
.global TEXT_END
TEXT_END: .dword _text_end
.global DATA_START
DATA_START: .dword _data_start
.global DATA_END
DATA_END: .dword _data_end
.global RODATA_START
RODATA_START: .dword _rodata_start
.global RODATA_END
RODATA_END: .dword _rodata_end
.global BSS_START
BSS_START: .dword _bss_start
.global BSS_END
BSS_END: .dword _bss_end
.global KERNEL_STACK_START
KERNEL_STACK_START: .dword _stack_start
.global KERNEL_STACK_END
KERNEL_STACK_END: .dword _stack_end
.section .data
.global KERNEL_TABLE
KERNEL_TABLE: .dword 0
|
Cethannel/ros | 4,366 | src/asm/boot.S | # boot.S
# bootloader for SoS
# Stephen Marz
# 8 February 2019
# Disable generation of compressed instructions.
.option norvc
# Define a .text.init section. The .text.init is put at the
# starting address so that the entry _start is put at the RISC-V
# address 0x8000_0000.
.section .text.init
# Execution starts here.
.global _start
_start:
# Disable linker instruction relaxation for the `la` instruction below.
# This disallows the assembler from assuming that `gp` is already initialized.
# This causes the value stored in `gp` to be calculated from `pc`.
# The job of the global pointer is to give the linker the ability to address
# memory relative to GP instead of as an absolute address.
.option push
.option norelax
la gp, _global_pointer
.option pop
# SATP should be zero, but let's make sure. Each HART has its own
# SATP register.
csrw satp, zero
# Any hardware threads (hart) that are not bootstrapping
# need to wait for an IPI
csrr t0, mhartid
bnez t0, 3f
# Set all bytes in the BSS section to zero.
la a0, _bss_start
la a1, _bss_end
bgeu a0, a1, 2f
1:
sd zero, (a0)
addi a0, a0, 8
bltu a0, a1, 1b
2:
# The stack grows from bottom to top, so we put the stack pointer
# to the very end of the stack range.
la sp, _stack_end
# Setting `mstatus` register:
# 0b01 << 11: Machine's previous protection mode is 2 (MPP=2).
li t0, 0b11 << 11 | (1 << 13)
csrw mstatus, t0
# Do not allow interrupts while running kinit
csrw mie, zero
# Machine's exception program counter (MEPC) is set to `kinit`.
la t1, kmain
csrw mepc, t1
# Set the return address to get us into supervisor mode
la ra, 2f
# We use mret here so that the mstatus register is properly updated.
mret
2:
# We set the return address (ra above) to this label. When kinit() is finished
# in Rust, it will return here.
# Setting `mstatus` (supervisor status) register:
# 0b01 << 11 : Previous protection mode is 1 (MPP=01 [Supervisor]).
# 1 << 7 : Previous machine interrupt-enable bit is 1 (MPIE=1 [Enabled])
# 1 << 5 : Previous interrupt-enable bit is 1 (SPIE=1 [Enabled]).
# We set the "previous" bits because the mret will write the current bits
# with the previous bits.
li t0, (0b00 << 11) | (1 << 7) | (1 << 5) | (1 << 13)
csrw mstatus, t0
# Machine's trap vector base address is set to `m_trap_vector`, for
# "machine" trap vector.
la t2, asm_trap_vector
csrw mtvec, t2
# Jump to first process. We put the MPP = 00 for user mode, so after
# mret, we will jump to the first process' addresss in user mode.
la ra, 4f
mret
3:
# Parked harts go here. We need to set these
# to only awaken if it receives a software interrupt,
# which we're going to call the SIPI (Software Intra-Processor Interrupt).
# We call the SIPI by writing the software interrupt into the Core Local Interruptor (CLINT)
# Which is calculated by: base_address + hart * 4
# where base address is 0x0200_0000 (MMIO CLINT base address)
# We only use additional harts to run user-space programs, although this may
# change.
# We divide up the stack so the harts aren't clobbering one another.
la sp, _stack_end
li t0, 0x10000
csrr a0, mhartid
mul t0, t0, a0
sub sp, sp, t0
# The parked harts will be put into machine mode with interrupts enabled.
li t0, 0b11 << 11 | (1 << 7) | (1 << 13)
csrw mstatus, t0
# Allow for MSIP (Software interrupt). We will write the MSIP from hart #0 to
# awaken these parked harts.
li t3, (1 << 3)
csrw mie, t3
# Machine's exception program counter (MEPC) is set to the Rust initialization
# code and waiting loop.
#la t1, kinit_hart
#csrw mepc, t1
# Machine's trap vector base address is set to `m_trap_vector`, for
# "machine" trap vector. The Rust initialization routines will give each
# hart its own trap frame. We can use the same trap function and distinguish
# between each hart by looking at the trap frame.
la t2, asm_trap_vector
csrw mtvec, t2
# Whenever our hart is done initializing, we want it to return to the waiting
# loop, which is just below mret.
la ra, 4f
# We use mret here so that the mstatus register is properly updated.
mret
4:
# wfi = wait for interrupt. This is a hint to the harts to shut everything needed
# down. However, the RISC-V specification allows for wfi to do nothing. Anyway,
# with QEMU, this will save some CPU!
wfi
j 4b
|
chaitanya21kumar/x1-btc-psbt-firmware | 11,110 | external/SEGGER_RTT_V796b/RTT/SEGGER_RTT_ASM_ARMv7M.S | /*********************************************************************
* (c) SEGGER Microcontroller GmbH *
* The Embedded Experts *
* www.segger.com *
**********************************************************************
-------------------------- END-OF-HEADER -----------------------------
File : SEGGER_RTT_ASM_ARMv7M.S
Purpose : Assembler implementation of RTT functions for ARMv7M
Additional information:
This module is written to be assembler-independent and works with
GCC and clang (Embedded Studio) and IAR.
*/
#define SEGGER_RTT_ASM // Used to control processed input from header file
#include "SEGGER_RTT.h"
/*********************************************************************
*
* Defines, fixed
*
**********************************************************************
*/
#define _CCIAR 0
#define _CCCLANG 1
#if (defined __SES_ARM) || (defined __GNUC__) || (defined __clang__)
#define _CC_TYPE _CCCLANG
#define _PUB_SYM .global
#define _EXT_SYM .extern
#define _END .end
#define _WEAK .weak
#define _THUMB_FUNC .thumb_func
#define _THUMB_CODE .code 16
#define _WORD .word
#define _SECTION(Sect, Type, AlignExp) .section Sect ##, "ax"
#define _ALIGN(Exp) .align Exp
#define _PLACE_LITS .ltorg
#define _DATA_SECT_START
#define _C_STARTUP _start
#define _STACK_END __stack_end__
#define _RAMFUNC
//
// .text => Link to flash
// .fast => Link to RAM
// OtherSect => Usually link to RAM
// Alignment is 2^x
//
#elif defined (__IASMARM__)
#define _CC_TYPE _CCIAR
#define _PUB_SYM PUBLIC
#define _EXT_SYM EXTERN
#define _END END
#define _WEAK _WEAK
#define _THUMB_FUNC
#define _THUMB_CODE THUMB
#define _WORD DCD
#define _SECTION(Sect, Type, AlignExp) SECTION Sect ## : ## Type ## :REORDER:NOROOT ## (AlignExp)
#define _ALIGN(Exp) alignrom Exp
#define _PLACE_LITS
#define _DATA_SECT_START DATA
#define _C_STARTUP __iar_program_start
#define _STACK_END sfe(CSTACK)
#define _RAMFUNC SECTION_TYPE SHT_PROGBITS, SHF_WRITE | SHF_EXECINSTR
//
// .text => Link to flash
// .textrw => Link to RAM
// OtherSect => Usually link to RAM
// NOROOT => Allows linker to throw away the function, if not referenced
// Alignment is 2^x
//
#endif
#if (_CC_TYPE == _CCIAR)
NAME SEGGER_RTT_ASM_ARMv7M
#else
.syntax unified
#endif
#if defined (RTT_USE_ASM) && (RTT_USE_ASM == 1)
#define SHT_PROGBITS 0x1
/*********************************************************************
*
* Public / external symbols
*
**********************************************************************
*/
_EXT_SYM __aeabi_memcpy
_EXT_SYM __aeabi_memcpy4
_EXT_SYM _SEGGER_RTT
_PUB_SYM SEGGER_RTT_ASM_WriteSkipNoLock
/*********************************************************************
*
* SEGGER_RTT_WriteSkipNoLock
*
* Function description
* Stores a specified number of characters in SEGGER RTT
* control block which is then read by the host.
* SEGGER_RTT_WriteSkipNoLock does not lock the application and
* skips all data, if the data does not fit into the buffer.
*
* Parameters
* BufferIndex Index of "Up"-buffer to be used (e.g. 0 for "Terminal").
* pBuffer Pointer to character array. Does not need to point to a \0 terminated string.
* NumBytes Number of bytes to be stored in the SEGGER RTT control block.
* MUST be > 0!!!
* This is done for performance reasons, so no initial check has do be done.
*
* Return value
* 1: Data has been copied
* 0: No space, data has not been copied
*
* Notes
* (1) If there is not enough space in the "Up"-buffer, all data is dropped.
* (2) For performance reasons this function does not call Init()
* and may only be called after RTT has been initialized.
* Either by calling SEGGER_RTT_Init() or calling another RTT API function first.
*/
_SECTION(.text, CODE, 2)
_ALIGN(2)
_THUMB_FUNC
SEGGER_RTT_ASM_WriteSkipNoLock: // unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pData, unsigned NumBytes) {
//
// Cases:
// 1) RdOff <= WrOff => Space until wrap-around is sufficient
// 2) RdOff <= WrOff => Space after wrap-around needed (copy in 2 chunks)
// 3) RdOff < WrOff => No space in buf
// 4) RdOff > WrOff => Space is sufficient
// 5) RdOff > WrOff => No space in buf
//
// 1) is the most common case for large buffers and assuming that J-Link reads the data fast enough
//
// Register usage:
// R0 Temporary needed as RdOff, <Tmp> register later on
// R1 pData
// R2 <NumBytes>
// R3 <Tmp> register. Hold free for subroutine calls
// R4 <Rem>
// R5 pRing->pBuffer
// R6 pRing (Points to active struct SEGGER_RTT_BUFFER_DOWN)
// R7 WrOff
//
PUSH {R4-R7}
ADD R3,R0,R0, LSL #+1
LDR.W R0,=_SEGGER_RTT // pRing = &_SEGGER_RTT.aUp[BufferIndex];
ADD R0,R0,R3, LSL #+3
ADD R6,R0,#+24
LDR R0,[R6, #+16] // RdOff = pRing->RdOff;
LDR R7,[R6, #+12] // WrOff = pRing->WrOff;
LDR R5,[R6, #+4] // pRing->pBuffer
CMP R7,R0
BCC.N _CheckCase4 // if (RdOff <= WrOff) { => Case 1), 2) or 3)
//
// Handling for case 1, later on identical to case 4
//
LDR R3,[R6, #+8] // Avail = pRing->SizeOfBuffer - WrOff - 1u; => Space until wrap-around (assume 1 byte not usable for case that RdOff == 0)
SUBS R4,R3,R7 // <Rem> (Used in case we jump into case 2 afterwards)
SUBS R3,R4,#+1 // <Avail>
CMP R3,R2
BCC.N _CheckCase2 // if (Avail >= NumBytes) { => Case 1)?
_Case4:
ADDS R5,R7,R5 // pBuffer += WrOff
ADDS R0,R2,R7 // v = WrOff + NumBytes
//
// 2x unrolling for the copy loop that is used most of the time
// This is a special optimization for small SystemView packets and makes them even faster
//
_ALIGN(2)
_LoopCopyStraight: // memcpy(pRing->pBuffer + WrOff, pData, NumBytes);
LDRB R3,[R1], #+1
STRB R3,[R5], #+1 // *pDest++ = *pSrc++
SUBS R2,R2,#+1
BEQ _CSDone
LDRB R3,[R1], #+1
STRB R3,[R5], #+1 // *pDest++ = *pSrc++
SUBS R2,R2,#+1
BNE _LoopCopyStraight
_CSDone:
#if _CORE_NEEDS_DMB // Do not slow down cores that do not need a DMB instruction here
DMB // Cortex-M7 may delay memory writes and also change the order in which the writes happen. Therefore, make sure that all buffer writes are finished, before updating the <WrOff> in the struct
#endif
STR R0,[R6, #+12] // pRing->WrOff = WrOff + NumBytes;
MOVS R0,#+1
POP {R4-R7}
BX LR // Return 1
_CheckCase2:
ADDS R0,R0,R3 // Avail += RdOff; => Space incl. wrap-around
CMP R0,R2
BCC.N _Case3 // if (Avail >= NumBytes) { => Case 2? => If not, we have case 3) (does not fit)
//
// Handling for case 2
//
ADDS R0,R7,R5 // v = pRing->pBuffer + WrOff => Do not change pRing->pBuffer here because 2nd chunk needs org. value
SUBS R2,R2,R4 // NumBytes -= Rem; (Rem = pRing->SizeOfBuffer - WrOff; => Space until end of buffer)
_LoopCopyBeforeWrapAround: // memcpy(pRing->pBuffer + WrOff, pData, Rem); => Copy 1st chunk
LDRB R3,[R1], #+1
STRB R3,[R0], #+1 // *pDest++ = *pSrc++
SUBS R4,R4,#+1
BNE _LoopCopyBeforeWrapAround
//
// Special case: First check that assumed RdOff == 0 calculated that last element before wrap-around could not be used
// But 2nd check (considering space until wrap-around and until RdOff) revealed that RdOff is not 0, so we can use the last element
// In this case, we may use a copy straight until buffer end anyway without needing to copy 2 chunks
// Therefore, check if 2nd memcpy is necessary at all
//
ADDS R4,R2,#+0 // Save <NumBytes> (needed as counter in loop but must be written to <WrOff> after the loop). Also use this inst to update the flags to skip 2nd loop if possible
BEQ.N _No2ChunkNeeded // if (NumBytes) {
_LoopCopyAfterWrapAround: // memcpy(pRing->pBuffer, pData + Rem, NumBytes);
LDRB R3,[R1], #+1 // pData already points to the next src byte due to copy loop increment before this loop
STRB R3,[R5], #+1 // *pDest++ = *pSrc++
SUBS R2,R2,#+1
BNE _LoopCopyAfterWrapAround
_No2ChunkNeeded:
#if _CORE_NEEDS_DMB // Do not slow down cores that do not need a DMB instruction here
DMB // Cortex-M7 may delay memory writes and also change the order in which the writes happen. Therefore, make sure that all buffer writes are finished, before updating the <WrOff> in the struct
#endif
STR R4,[R6, #+12] // pRing->WrOff = NumBytes; => Must be written after copying data because J-Link may read control block asynchronously while writing into buffer
MOVS R0,#+1
POP {R4-R7}
BX LR // Return 1
_CheckCase4:
SUBS R0,R0,R7
SUBS R0,R0,#+1 // Avail = RdOff - WrOff - 1u;
CMP R0,R2
BCS.N _Case4 // if (Avail >= NumBytes) { => Case 4) == 1) ? => If not, we have case 5) == 3) (does not fit)
_Case3:
MOVS R0,#+0
POP {R4-R7}
BX LR // Return 0
_PLACE_LITS
#endif // defined (RTT_USE_ASM) && (RTT_USE_ASM == 1)
_END
/*************************** End of file ****************************/
|
chengsx21/THU-OS | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
chengsx21/THU-OS | 2,008 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.section .data
# emergency stack for kernel trap
# in order to print trap info even if the kernel stack is corrupted.
__emergency:
.align 4
.space 1024 * 4
__emergency_end:
.section .text
.globl __trap_from_kernel
# 2^2=4 bytes aligned for stvec
.align 2
__trap_from_kernel:
la sp, __emergency_end
j trap_from_kernel
|
chengyupku/tvm | 5,304 | src/runtime/hexagon/profiler/lwp_handler.S | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
Lightweight profiling handler to record processor cycles in a buffer
(pointed by __lwp_buffer_ptr) for a given invocation of the handler. To keep the
buffer size within a resonable limit, we only recond data for the first 100
invocation of the handler for a given loop or function ID (passed in R0 register).
The buffer size wouldn't be a concern if the loops with only siblings are getting
profiled. However, since the instrumentation provides several different profiling
options, this approach ensures that they all function as expexted. We use second
buffer (pointed by __lwp_counter) to keep count of the calls made to lwp_handler
function for each function/loop.
Brief explanation of all the global variables used:
1) __lwp_counter : Pointer to the buffer that keeps count of the number of times handler
is called for a given ID. To reduce the complexity of the handler, __lwp_counter is
indexed using the ID itself.
2) __lwp_buffer_ptr : Pointer to the buffer that records loop/function ID, processor cycles
and return addresss of the handler. Return address is used to reconstruct the call graph
(loop-nest) to make it easier to analyze the profiling data.
3) __lwp_buffer_size : Size of the buffer
4) __lwp_buffer_count : Offset into main lwp buffer where data for the current handler
invocation needs to be written.
NOTE: The handler function saves and restores R0-R5 registers which are caller saved registers
on Hexagon and should be handled at the callsite. However, to reduce the codegen impact
of the handler calls on the caller functions, we decided to move this part into the
handler itself.
*/
.text
.globl lwp_handler
.falign
.type lwp_handler,@function
lwp_handler:
{ allocframe(#24) // Allocate 24 bytes on the stack to save R0-R5 registers
memd(r29+#-16) = r5:4 // Save R5,R4
}
{
memd(r29+#8) = r3:2 // Save R3,R2
memd(r29+#0) = r1:0 // Save R1, R0
r2 = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL) // Get GOT address
}
{
r5 = memw(r2+##__lwp_counter@GOT) // Get address of the pointer to __lwp_counter
r3 = memw(r2+##__lwp_buffer_count@GOT) // Get the address of __lwp_buffer_count
}
{
r5 = memw(r5+#0) // Get the address of __lwp_counter (address of the main lwp buffer)
r3 = memw(r3+#0) // Get the __lwp_buffer_count value (offset into the main buffer)
}
{
r4 = memw(r5+r0<<#2) // Get the handler invocation count for the ID (passed in R0)
r1 = memw(r2+##__lwp_buffer_size@GOT) // Get the address of __lwp_buffer_size
}
{
r4 = add(r4,#1) // Increment count
memw(r5+r0<<#2) = r4.new // Update count in __lwp_counter for a given ID
r1 = memw(r1+#0) // Get the buffer size
}
{
p0 = cmp.gtu(r4,#100) // Exit if count for a given ID is greater than 100
if (p0.new) jump:nt .LBB0_3
r5 = memw(r2+##__lwp_buffer_ptr@GOT) // Get address of the pointer to __lwp_buffer_ptr
}
{
r5 = memw(r5+#0) // Get address of __lwp_buffer_ptr
r2 = memw(r2+##__lwp_buffer_count@GOT) // Get address of __lwp_buffer_count
}
{
r4 = add(r3,#4) // Increment the offset by 4 since 4 int32 values are stored for each invocation
if (!cmp.gtu(r1,r4.new)) jump:t .LBB0_3 // Exit if the main lwp buffer has run out of space
}
{
r5 = addasl(r5,r3,#2) // Get the address where the data needs to be recorded
memw(r2+#0) = r4 // Save next offset into __lwp_buffer_count
}
{
memw(r5+#0) = r31 // Save return address of this function
r1:0 = C15:14 // Control registers that keep processor cycle count (64-bits)
memw(r5+#4) = r0 // Save loop/function ID
}
{
memw(r5+#12) = r1 // Save upper 32 bits
memw(r5+#8) = r0 // Save lower 32 bits
}
.falign
.LBB0_3:
{
r5:4 = memd(r29+#16) // Restore the registers from the stack
r3:2 = memd(r29+#8)
}
{
r1:0 = memd(r29+#0)
dealloc_return // Deallocate the stack and return
}
.Lfunc_end0:
.size lwp_handler, .Lfunc_end0-lwp_handler
|
chensx00/gem5_custom | 3,702 | system/arm/bootloader/arm/boot.S | /*
* Copyright (c) 2010 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*************************************************************************
* Super simple bootloader
* Preserve loaded values that we need to pass to the kernel (r0, r1, r2)
* Additionally M5 puts the kernel start address in r3
*
* Upon executing this code:
* r0 = 0, r1 = machine number, r2 = atags ptr
* r3 = kernel start address, r4 = GIC address, r5 = flag register address
*
* CPU 0 should branch to the kernel start address and it's done with
* the boot loader. Other CPUs need to start in a wfi loop. When CPU0 sends
* an IPI the slave CPUs reads a register which CPU0 has programmed with the
* boot address for the secondary cpu
**************************************************************************/
.text
.globl _start
.extern main
_start:
_entry:
b bootldr // All the interrupt vectors jump to the boot loader
b bootldr
b bootldr
b bootldr
b bootldr
b bootldr
b bootldr
b bootldr
b bootldr
bootldr:
mrc p15, 0, r8, c0, c0, 5 // get the MPIDR register
bics r8, r8, #0xff000000 // isolate the lower 24 bits (affinity levels)
bxeq r3 // if it's 0 (CPU 0), branch to kernel
mov r8, #1
str r8, [r4, #0] // Enable CPU interface on GIC
wfi // wait for an interrupt
pen:
ldr r8, [r5] // load the value
movs r8, r8 // set the flags on this value
beq pen // if it's zero try again
bx r8 // Jump to where we've been told
bkpt // We should never get here
|
chensx00/gem5_custom | 8,370 | system/arm/bootloader/arm64/boot.S | /*
* Copyright (c) 2012, 2020, 2022 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.text
.globl _start
_start:
/* Save some values initialized by gem5. */
/* DTB address. */
mov x21, x0
/* Kernel entry point. */
mov x20, x3
/* cpu-release-addr. */
mov x22, x5
/*
* EL3 initialisation
*/
mrs x0, CurrentEL
cmp x0, #0xc // EL3?
b.ne start_ns // skip EL3 initialisation
mov x0, #0x30 // RES1
orr x0, x0, #(1 << 0) // Non-secure EL1
orr x0, x0, #(1 << 8) // HVC enable
orr x0, x0, #(1 << 10) // 64-bit EL2
orr x0, x0, #(1 << 16) // Disable FEAT_PAuth traps (APK)
orr x0, x0, #(1 << 17) // Disable FEAT_PAuth traps (API)
msr scr_el3, x0
mov x0, #(1 << 8) // Disable SVE trap to EL3
orr x0, x0, #(1 << 12) // Disable SME trap to EL3
msr cptr_el3, x0 // Disable copro. traps to EL3
/*
* Check for the primary CPU to avoid a race on the distributor
* registers.
*/
mrs x0, mpidr_el1
// ARM MPIDR_EL1 bytes: Aff3 (AArch64), Stuff, Aff2, Aff1, Aff0
// Test the the MPIDR_EL1 register against 0xff00ffffff to
// extract the primary CPU.
ldr x1, =0xff00ffffff
#ifdef GICV3
tst x0, x1 // check for cpuid==zero
b.ne 3f // secondary CPU
ldr x1, =GIC_DIST_BASE // GICD_CTLR
mov w0, #7 // EnableGrp0 | EnableGrp1NS | EnableGrp1S
str w0, [x1]
mov x2, #0xffe8
ldr w2, [x1, x2] // GICD_PIDR2
and w3, w2, #0xf0 // GICD_PIDR2.Read ArchRev
cmp w3, #0x30 // Check if GICv3
mov w4, #0x20000 // 128KiB redistributor stride
mov w5, #0x40000 // 256KiB redistributor stride
csel w2, w4, w5, eq // Select the correct redistributor stride
ldr x1, =GIC_REDIST_BASE
mov w0, #~0 // Grp1 interrupts
1: ldr w3, [x1, #0x14] // GICR_WAKER
and w3, w3, #~0x2 // Setting GICR_WAKER.ProcessorSleep to 0
str w3, [x1, #0x14]
dsb sy
2: ldr w3, [x1, #0x14] // Re-reading GICR_WAKER
ands w3, w3, #0x4 // Checking GICR_WAKER.ChildrenAsleep
b.ne 2b // Keep reading GICR_WAKER.ChildrenAsleep until awake
add x5, x1, #0x10000 // SGI base
str w0, [x5, #0x80] // GICR_IGROUPR0
ldr w4, [x1, #0x8] // GICR_TYPER
add x1, x1, x2 // Point to next redistributor
// Check GICR_TYPER.Last, which is set to 1
// if this is the last redistributor
ands w4, w4, #0x10
b.eq 1b // Branch back if not last redistributor
ldr x1, =GIC_DIST_BASE + 0x84 // GICD_IGROUPR
str w0, [x1], #4
str w0, [x1], #4
str w0, [x1], #4
/* SRE & Disable IRQ/FIQ Bypass & Allow EL2 access to ICC_SRE_EL2 */
3: mrs x10, S3_6_C12_C12_5 // read ICC_SRE_EL3
orr x10, x10, #0xf // enable 0xf
msr S3_6_C12_C12_5, x10 // write ICC_SRE_EL3
isb
mov x0, #1
msr S3_0_c12_c12_6, x0 // ICC_IGRPEN0_EL1 Enable
msr S3_0_C12_C12_7, x0 // ICC_IGRPEN1_EL1 Enable
#else
tst x0, x1 // check for cpuid==zero
b.ne 1f // secondary CPU
ldr x1, =GIC_DIST_BASE // GICD_CTLR
mov w0, #3 // EnableGrp0 | EnableGrp1
str w0, [x1]
1: ldr x1, =GIC_DIST_BASE + 0x80 // GICD_IGROUPR
mov w0, #~0 // Grp1 interrupts
str w0, [x1], #4
b.ne 2f // Only local interrupts for secondary CPUs
str w0, [x1], #4
str w0, [x1], #4
2: ldr x1, =GIC_CPU_BASE // GICC_CTLR
ldr w0, [x1]
mov w0, #3 // EnableGrp0 | EnableGrp1
str w0, [x1]
mov w0, #1 << 7 // allow NS access to GICC_PMR
str w0, [x1, #4] // GICC_PMR
#endif
msr sctlr_el2, xzr
/*
* Prepare the switch to the EL2_SP1 mode from EL3
*/
ldr x0, =start_ns // Return after mode switch
mov x1, #0x3c9 // EL2_SP1 | D | A | I | F
msr elr_el3, x0
msr spsr_el3, x1
eret
start_ns:
/*
* Kernel parameters
*/
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mrs x4, mpidr_el1
// ARM MPIDR_EL1 bytes: Aff3 (AArch64), Stuff, Aff2, Aff1, Aff0
// Test the the MPIDR_EL1 register against 0xff00ffffff to
// extract the primary CPU.
ldr x1, =0xff00ffffff
tst x4, x1 // check for cpuid==zero
mov x1, xzr // load previous 'xzr' value back to x1
b.eq 2f // secondary CPU
/*
* Secondary CPUs
*/
1: wfe
/* The Linux kernel v5.8 and older writes the entry point address
* of the secondary CPUs to this address, and does a SEV, waking up
* the secondary CPUs.
*
* gem5 informs the kernel the desired address via cpu-release-addr
* of the DTB.
*
* When this is first reached immediately after the bootloader starts,
* the value at that address must be 0, which is the default memory
* value set by gem5 for otherwise uninitialized memory, leading to
* WFE.
*/
ldr x4, [x22]
cbz x4, 1b
br x4 // branch to the given address
2:
/*
* UART initialisation (38400 8N1)
*/
ldr x4, =UART_BASE // UART base
mov w5, #0x10 // ibrd
str w5, [x4, #0x24]
mov w5, #0xc300
orr w5, w5, #0x0001 // cr
str w5, [x4, #0x30]
/*
* CLCD output site MB
*/
ldr x4, =SYSREGS_BASE
ldr w5, =(1 << 31) | (1 << 30) | (7 << 20) | (0 << 16) // START|WRITE|MUXFPGA|SITE_MB
str wzr, [x4, #0xa0] // V2M_SYS_CFGDATA
str w5, [x4, #0xa4] // V2M_SYS_CFGCTRL
/*
* Primary CPU
*/
// The kernel boot protocol specifies that the DTB address is placed
// in x0.
// https://github.com/torvalds/linux/blob/v5.7/Documentation/arm64/
// booting.rst#4-call-the-kernel-image
mov x0, x21
// Jump into the kernel entry point.
br x20
.ltorg
.org 0x200
|
chensx00/gem5_custom | 12,166 | ext/systemc/src/sysc/qt/md/ksr1.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.file "ksr1.s"
.def .debug; .endef
.align 128
.globl qt_blocki
.globl qt_blocki$TXT
.globl qt_block
.globl qt_block$TXT
.globl qt_start$TXT
.globl qt_start
.globl qt_abort$TXT
.globl qt_abort
.globl qt_vstart
.globl qt_vstart$TXT
#
# KSR convention: on procedure calls, load both the procedure address
# and a pointer to a constant block. The address of function `f' is
# `f$TXT', and the constant block address is `f'. The constant block
# has several reserved values:
#
# 8 bytes fpu register save mask
# 4 bytes ipu register save mask
# 4 bytes ceu register save mask
# f: f$TXT
# ... whatever you want ... (not quite...read on)
#
# Note, by the way, that a pointer to a function is passed as a
# pointer to the constant area, and the constant area has the text
# address.
#
#
# Procedures that do not return structures prefix their code with
#
# proc$TXT:
# finop; cxnop
# finop; cxnop
# <proc code>
#
# Calls to those procedures branch to a 16 byte offset (4 instrs) in
# to the procedure to skip those instructions.
#
# Procedures that return structures use a different code prefix:
#
# proc$TXT:
# finop; beq.qt %rc, %rc, 24 # return value entry
# finop; cxnop
# finop; movi8 0, %rc # no return value entry
# <proc code>
#
# Calls that want the returned structure branch directly to the
# procedure address. Callers that don't want (or aren't expecting) a
# return value branche 16 bytes in to the procedure, which will zero
# %rc, telling the called procedure not to return a structure.
#
#
# On entry:
# %i2 -- control block of helper function to run
# (dereference to get helper)
# %i3 -- a1
# %i4 -- a2
# %i5 -- sp of new to run
#
.data
.half 0x0, 0x0, 0x7ffff000, 0x7fff8000
qt_blocki:
qt_abort:
.word qt_blocki$TXT
.word qt_restore$TXT
.text
qt_abort$TXT:
qt_blocki$TXT:
finop ; cxnop # entry prefix
finop ; cxnop # entry prefix
add8.ntr 75,%i31,%i31 ; movi8 512,%c5 # ICR; stk adjust
finop ; ssub8.ntr 0,%sp,%c5,%sp
finop ; st8 %fp,504(%sp) # Save caller's fp
finop ; st8 %cp,496(%sp) # Save caller's cp
finop ; ld8 8(%c10),%c5 # ld qt_restore$TXT
finop ; st8 %c14,0(%sp) # Save special ret addr
finop ; mov8_8 %c10, %cp # Our cp
finop ; sadd8.ntr 0,%sp,%c5,%fp # Our frame ptr
finop ; st8 %c5,8(%sp) # st qt_restore$TXT
#
# CEU registers %c15-%c24, %c26-%c30 (%c14 we restore later)
#
finop ; st8 %c15,456(%sp)
finop ; st8 %c16,448(%sp)
finop ; st8 %c17,440(%sp)
finop ; st8 %c18,432(%sp)
finop ; st8 %c19,424(%sp)
finop ; st8 %c20,416(%sp)
finop ; st8 %c21,408(%sp)
finop ; st8 %c22,400(%sp)
finop ; st8 %c23,392(%sp)
finop ; st8 %c24,384(%sp)
#
# %c25 is the Enclosing Frame Pointer (EFP) -- since C doesn't
# use nested procedures, we ignore it (leaving a gap, though)
#
finop ; st8 %c26,368(%sp)
finop ; st8 %c27,360(%sp)
finop ; st8 %c28,352(%sp)
finop ; st8 %c29,344(%sp)
finop ; st8 %c30,336(%sp)
#
# IPU registers %i12-%i30
#
finop ; st8 %i12,328(%sp)
finop ; st8 %i13,320(%sp)
finop ; st8 %i14,312(%sp)
finop ; st8 %i15,304(%sp)
# (gap to get alignment for st64)
# -- Doesn't work on version 1.1.3 of the OS
# finop ; st64 %i16,256(%sp)
finop ; st8 %i16,256(%sp)
finop ; st8 %i17,248(%sp)
finop ; st8 %i18,240(%sp)
finop ; st8 %i19,232(%sp)
finop ; st8 %i20,224(%sp)
finop ; st8 %i21,216(%sp)
finop ; st8 %i22,208(%sp)
finop ; st8 %i23,200(%sp)
finop ; st8 %i24,192(%sp)
finop ; st8 %i25,184(%sp)
finop ; st8 %i26,176(%sp)
finop ; st8 %i27,168(%sp)
finop ; st8 %i28,160(%sp)
finop ; st8 %i29,152(%sp)
finop ; st8 %i30,144(%sp)
#
# FPU already saved, or saving not necessary
#
#
# Switch to the stack passed in as fourth argument to the block
# routine (%i5) and call the helper routine passed in as the first
# argument (%i2). Note that the address of the helper's constant
# block is passed in, so we must derefence it to get the helper's text
# address.
#
finop ; movb8_8 %i2,%c10 # helper's ConstBlock
finop ; cxnop # Delay slot, fill w/
finop ; cxnop # .. 2 st8 from above
finop ; ld8 0(%c10),%c4 # load addr of helper
finop ; movb8_8 %sp, %i2 # 1st arg to helper
# is this stack; other
# args remain in regs
finop ; movb8_8 %i5,%sp # switch stacks
finop ; jsr %c14,16(%c4) # call helper
movi8 3, %i0 ; movi8 0,%c8 # nargs brain dmg
finop ; cxnop
finop ; cxnop
#
# Here is where behavior differs for threads being restored and threads
# being started. Blocked threads have a pointer to qt_restore$TXT on
# the top of their stacks; manufactured stacks have a pointer to qt_start$TXT
# on the top of their stacks. With this setup, starting threads
# skip the (unecessary) restore operations.
#
# We jump to an offset of 16 to either (1) skip past the two noop pairs
# at the start of qt_start$TXT, or (2) skip past the two noop pairs
# after qt_restore$TXT.
#
finop ; ld8 8(%sp),%c4
finop ; cxnop
finop ; cxnop
finop ; jmp 16(%c4)
qt_restore$TXT:
finop ; cxnop
finop ; cxnop
#
# Point of Restore:
#
# The helper funtion will return here. Any result it has placed in
# a return register (most likely %i0) will not get overwritten below
# and will consequently be the return value of the blocking routine.
#
#
# CEU registers %c15-%c24, %c26-%c30 (%c14 we restore later)
#
finop ; ld8 456(%sp),%c15
finop ; ld8 448(%sp),%c16
finop ; ld8 440(%sp),%c17
finop ; ld8 432(%sp),%c18
finop ; ld8 424(%sp),%c19
finop ; ld8 416(%sp),%c20
finop ; ld8 408(%sp),%c21
finop ; ld8 400(%sp),%c22
finop ; ld8 392(%sp),%c23
finop ; ld8 384(%sp),%c24
#
# %c25 is the Enclosing Frame Pointer (EFP) -- since C doesn't
# use nested procedures, we ignore it (leaving a gap, though)
#
finop ; ld8 368(%sp),%c26
finop ; ld8 360(%sp),%c27
finop ; ld8 352(%sp),%c28
finop ; ld8 344(%sp),%c29
finop ; ld8 336(%sp),%c30
#
# IPU registers %i12-%i30
#
finop ; ld8 328(%sp),%i12
finop ; ld8 320(%sp),%i13
finop ; ld8 312(%sp),%i14
finop ; ld8 304(%sp),%i15
# (gap to get alignment for ld64)
# -- Doesn't work on version 1.1.3 of the OS
# finop ; ld64 256(%sp),%i16
finop ; ld8 256(%sp),%i16
finop ; ld8 248(%sp),%i17
finop ; ld8 240(%sp),%i18
finop ; ld8 232(%sp),%i19
finop ; ld8 224(%sp),%i20
finop ; ld8 216(%sp),%i21
finop ; ld8 208(%sp),%i22
finop ; ld8 200(%sp),%i23
finop ; ld8 192(%sp),%i24
finop ; ld8 184(%sp),%i25
finop ; ld8 176(%sp),%i26
finop ; ld8 168(%sp),%i27
finop ; ld8 160(%sp),%i28
finop ; ld8 152(%sp),%i29
finop ; ld8 144(%sp),%i30
#
# FPU registers don't need to be loaded, or will be loaded by an
# enclosing scope (e.g., if this is called by qt_block).
#
#
# Load the special registers. We don't load the stack ptr because
# the new stack is passed in as an argument, we don't load the EFP
# because we don't use it, and we load the return address specially
# off the top of the stack.
#
finop ; ld8 0(%sp),%c14 # return addr
finop ; ld8 496(%sp),%cp
finop ; ld8 504(%sp),%fp
finop ; jmp 32(%c14) # jump back to thread
finop ; movi8 512,%c5 # stack adjust
finop ; sadd8.ntr 0,%sp,%c5,%sp
.data
.half 0x0, 0x0, 0x7ffff000, 0x7fff8000
qt_block:
.word qt_block$TXT
.word qt_error
.word qt_error$TXT
.word qt_blocki
#
# Handle saving and restoring the FPU regs, relying on qt_blocki
# to save and restore the remaining registers.
#
.text
qt_block$TXT:
finop ; cxnop # entry prefix
finop ; cxnop # entry prefix
add8.ntr 29,%i31,%i31 ; movi8 512,%c5 # ICR; stk adjust
finop ; ssub8.ntr 0,%sp,%c5,%sp
finop ; st8 %fp,504(%sp) # Save caller's fp
finop ; st8 %cp,496(%sp) # Save caller's cp
finop ; st8 %c14,488(%sp) # store ret addr
finop ; sadd8.ntr 0,%sp,%c5,%fp # Our frame ptr
finop ; mov8_8 %c10, %cp # Our cp
#
# Store 8 registers at once...destination must be a multiple of 64
#
finop ; st64 %f16,384(%sp)
finop ; st64 %f24,320(%sp)
finop ; st64 %f32,256(%sp)
finop ; st64 %f40,192(%sp)
finop ; st64 %f48,128(%sp)
finop ; st64 %f56,64(%sp)
#
# Call the integer blocking routine, passing the arguments passed to us
#
finop ; ld8 24(%cp), %c10
finop ; cxnop
finop ; jsr %c14, qt_blocki$TXT
finop ; cxnop
finop ; cxnop
movi8 4,%i0 ; movi8 0,%c8 # nargs brain dmg
#
# Load 8 registers at once...source must be a multiple of 64
#
finop ; ld64 64(%sp),%f56
finop ; ld64 128(%sp),%f48
finop ; ld64 192(%sp),%f40
finop ; ld64 256(%sp),%f32
finop ; ld64 320(%sp),%f24
finop ; ld64 384(%sp),%f16
finop ; ld8 488(%sp),%c14
finop ; ld8 496(%sp),%cp
finop ; ld8 504(%sp),%fp
finop ; jmp 32(%c14) # jump back to thread
finop ; movi8 512,%c5 # stack adjust
finop ; sadd8.ntr 0,%sp,%c5,%sp
.data
.half 0x0, 0x0, 0x7ffff000, 0x7fff8000
qt_start:
.word qt_start$TXT
#
# A new thread is set up to "appear" as if it were executing code at
# the beginning of qt_start and then it called a blocking routine
# (qt_blocki). So when a new thread starts to run, it gets unblocked
# by the code above and "returns" to `qt_start$TXT' in the
# restore step of the switch. Blocked threads jump to 16(qt_restore$TXT),
# and starting threads jump to 16(qt_start$TXT).
#
.text
qt_start$TXT:
finop ; cxnop #
finop ; cxnop #
finop ; ld8 40(%sp),%c10 # `only' constant block
finop ; ld8 32(%sp),%i4 # `userf' arg.
finop ; ld8 24(%sp),%i3 # `t' arg.
finop ; ld8 0(%c10),%c4 # `only' text location
finop ; ld8 16(%sp),%i2 # `u' arg.
finop ; cxnop
finop ; jsr %c14,16(%c4) # call `only'
#
# Pop the frame used to store the thread's initial data
#
finop ; sadd8.ntr 0,%sp,128,%sp
finop ; cxnop
movi8 2,%i0 ; movi8 0,%c8 # nargs brain dmg
#
# If we ever return, it's an error.
#
finop ; jmp qt_error$TXT
finop ; cxnop
finop ; cxnop
movi8 0,%i0 ; movi8 0,%c8 # nargs brain dmg
#
# This stuff is broken
#
.data
.half 0x0, 0x0, 0x7ffff000, 0x7fff8000
qt_vstart:
.word qt_vstart$TXT
.text
qt_vstart$TXT:
finop ; cxnop # entry prefix
finop ; cxnop # entry prefix
finop ; cxnop
finop ; cxnop
add8.ntr 11,%i31,%i31 ; movi8 512,%c5
finop ; ssub8.ntr 0,%sp,%c5,%sp # fix stack
finop ; ld8 8(%sp),%i2 # load `t' as arg to
finop ; cxnop # `startup'
finop ; cxnop
finop ; ld8 16(%sp),%c10 # `startup' const block
finop ; cxnop
finop ; cxnop
finop ; ld8 0(%c10),%c4 # `startup' text loc.
finop ; cxnop
finop ; cxnop
finop ; jsr %c14,16(%c4) # call `startup'
finop ; cxnop
finop ; cxnop
movi8 1, %i0 ; movi8 0,%c8 # nargs brain dmg
#
# finop ; sadd 0,%sp,128,%sp # alter stack
#
finop ; ld8 8(%sp),%i2 # load `t' as arg to
finop ; ld8 8(%sp),%i2 # load `t' as arg to
finop ; ld8 8(%sp),%i2 # load `t' as arg to
finop ; ld8 8(%sp),%i2 # load `t' as arg to
finop ; ld8 32(%sp),%c10 # `only' constant block
finop ; ld8 8(%sp),%i2 # `u' arg.
finop ; ld8 16(%sp),%i3 # `t' arg.
finop ; ld8 0(%c10),%c4 # `only' text location
finop ; ld8 24(%sp),%i4 # `userf' arg.
finop ; cxnop
finop ; jsr %c4,16(%c4) # call `only'
finop ; cxnop
finop ; cxnop
#
# If the callee ever calls `nargs', the following instruction (pair)
# will be executed. However, we don't know when we compile this code
# how many args are being passed. So we give our best guess: 0.
#
movi8 0,%i0 ; movi8 0,%c8 # nargs brain dmg
#
# If we ever return, it's an error.
#
finop ; jmp qt_error$TXT
finop ; cxnop
finop ; cxnop
movi8 0,%i0 ; movi8 0,%c8 # nargs brain dmg
|
chensx00/gem5_custom | 3,365 | ext/systemc/src/sysc/qt/md/powerpc_sys5_b.s | /* speed test for basic CPU operations */
/* Marco Bucci <marco.bucci@inwind.it> */
/* This code was developed with the Code Warrior integrate ppc assembler.
* Macros are use to hide illegal constructs whether you are using a
* "normal" assembler or the "C integrated" assembler.
*/
#if 0
.text
.align 4
.globl b_call_reg
.globl _b_call_reg
.globl b_call_imm
.globl _b_call_imm
.globl b_add
.globl _b_add
.globl b_load
.globl _b_load
.set fsize, 64
.set lrsave, 4
#else
#define fsize 64
#define lrsave 4
#endif
#if 0
.if 0
#endif
asm void b_null(void)
{
#if 0
.endif
#endif
#if 0
b_null:
#endif
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* actually the same as the following. How to get "b_null" address?
* I didnt find the right sintax or the right way.
* I should take the current PC, then the difference to "b_null"
* (making the difference beween the labels), perform the sum and go?!
*/
#if 0
.if 0
#endif
asm void b_call_reg(long n)
{
#if 0
.endif
#endif
#if 0
b_call_reg:
_b_call_reg:
#endif
mflr %r0
stw %r31,-4(%r1)
stw %r30,-8(%r1)
stw %r0,lrsave(%r1)
stwu %r1,-fsize(%r1)
mr %r30,%r3
li %r31,0
b L1
L0:
bl b_null
bl b_null
bl b_null
bl b_null
bl b_null
addi %r31,%r31,5
L1:
cmpw %r31,%r30
blt L0
lwz %r0,lrsave+fsize(%r1)
mtlr %r0
lwz %r31,-4+fsize(%r1)
lwz %r30,-8+fsize(%r1)
addi %r1,%r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
#if 0
.if 0
#endif
asm void b_call_imm(long n)
{
#if 0
.endif
#endif
#if 0
b_call_imm:
_b_call_imm:
#endif
mflr %r0
stw %r31,-4(%r1)
stw %r30,-8(%r1)
stw %r0,lrsave(%r1)
stwu %r1,-fsize(%r1)
mr %r30,%r3
li %r31,0
b L3
L2:
bl b_null
bl b_null
bl b_null
bl b_null
bl b_null
addi %r31,%r31,5
L3:
cmpw %r31,%r30
blt L2
lwz %r0,lrsave+fsize(%r1)
mtlr %r0
lwz %r31,-4+fsize(%r1)
lwz %r30,-8+fsize(%r1)
addi %r1,%r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
#if 0
.if 0
#endif
asm void b_add(long n)
{
#if 0
.endif
#endif
#if 0
b_add:
_b_add:
#endif
mflr %r0
stw %r31,-4(%r1)
stw %r30,-8(%r1)
stw %r0,lrsave(%r1)
stwu %r1,-fsize(%r1)
mr %r30,%r3
li %r31,0
b L5
L4:
addi %r3,%r3,5
addi %r4,%r4,5
addi %r5,%r5,5
addi %r6,%r6,5
addi %r7,%r7,5
addi %r3,%r3,5
addi %r4,%r4,5
addi %r5,%r5,5
addi %r6,%r6,5
addi %r7,%r7,5
addi %r31,%r31,10
L5:
cmpw %r31,%r30
blt L4
lwz %r0,lrsave+fsize(%r1)
mtlr %r0
lwz %r31,-4+fsize(%r1)
lwz %r30,-8+fsize(%r1)
addi %r1,%r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
#if 0
.if 0
#endif
asm void b_load(long n)
{
#if 0
.endif
#endif
#if 0
b_load:
_b_load:
#endif
mflr %r0
stw %r31,-4(%r1)
stw %r30,-8(%r1)
stw %r0,lrsave(%r1)
stwu %r1,-fsize(%r1)
mr %r30,%r3
li %r31,0
b L7
L6:
lwz %r3,4(%r1)
lwz %r4,8(%r1)
lwz %r5,12(%r1)
lwz %r6,16(%r1)
lwz %r7,20(%r1)
lwz %r3,24(%r1)
lwz %r4,28(%r1)
lwz %r5,32(%r1)
lwz %r6,36(%r1)
lwz %r7,40(%r1)
addi %r31,%r31,10
L7:
cmpw %r31,%r30
blt L6
lwz %r0,lrsave+fsize(%r1)
mtlr %r0
lwz %r31,-4+fsize(%r1)
lwz %r30,-8+fsize(%r1)
addi %r1,%r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
|
chensx00/gem5_custom | 6,002 | ext/systemc/src/sysc/qt/md/hppa_b.s | ; QuickThreads -- Threads-building toolkit.
; Copyright (c) 1993 by David Keppel
; Permission to use, copy, modify and distribute this software and
; its documentation for any purpose and without fee is hereby
; granted, provided that the above copyright notice and this notice
; appear in all copies. This software is provided as a
; proof-of-concept and for demonstration purposes; there is no
; representation about the suitability of this software for any
; purpose.
; This file (pa-risc_b.s) is part of the port of QuickThreads for
; PA-RISC 1.1 architecture. It contains assembly-level support for
; raw processor performance measurement. It was written in 1994 by
; Uwe Reder (`uereder@cip.informatik.uni-erlangen.de')
; for the Operating Systems Department (IMMD4) at the
; University of Erlangen/Nuernberg Germany.
; Note that the number of instructions in the measurement-loops, differ
; from implementation to implementation. I took eight instructions in a loop
; for every test (execute eight instructions and loop to the start).
.CODE
.IMPORT $global$,DATA
.IMPORT $$dyncall,MILLICODE
.EXPORT b_call_reg
.EXPORT b_call_imm
.EXPORT b_add
.EXPORT b_load
; Just do nothing, only return to caller. This procedure is called by
; `b_call_reg' and `b_call_imm'.
b_null
.PROC
.CALLINFO NO_CALLS, FRAME=0
.ENTRY
bv,n %r0(%rp) ; just return
.EXIT
.PROCEND
; Call the procedure `b_null' with function pointer in a register.
b_call_reg
.PROC
.CALLINFO CALLER, FRAME=0
.ENTRY
stwm %r3,64(%sp) ; store r3 (may be used by caller)
stw %rp,-20(%sp) ; save return-pointer to frame-marker
addil LR'to_call-$global$,%r27
ldw RR'to_call-$global$(%r1),%r3
_loop0
copy %r3,%r22 ; copy the procedure label to r22, ...
.CALL ; ...this is the input to $$dyncall
bl $$dyncall,%mrp ; call $$dyncall (millicode function)
copy %mrp,%rp ; remember the return-pointer
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
copy %r3,%r22
.CALL
bl $$dyncall,%mrp
copy %mrp,%rp
addibf,<= -8,%arg0,_loop0 ; decrement counter by 8 and loop
nop
ldw -20(%sp),%rp ; restore return-pointer
bv %r0(%rp) ; return to caller
ldwm -64(%sp),%r3 ; resore r3 and remove stack frame
.EXIT
.PROCEND
; Call the procedure `b_null' immediate.
b_call_imm
.PROC
.CALLINFO CALLER, FRAME=0, SAVE_RP
.ENTRY
ldo 64(%sp),%sp ; caller needs a stack-frame
stw %rp,-20(%sp) ; save return-pointer to frame-marker
_loop1
bl b_null,%rp ; call `b_null' immediate (8 times)
nop
bl b_null,%rp
nop
bl b_null,%rp
nop
bl b_null,%rp
nop
bl b_null,%rp
nop
bl b_null,%rp
nop
bl b_null,%rp
nop
bl b_null,%rp
nop
addibf,<= -8,%arg0,_loop1 ; decrement counter by 8 and loop
nop
ldw -20(%sp),%rp ; restore return-pointer
bv %r0(%rp) ; return to caller
ldo -64(%sp),%sp ; remove stack-frame
.EXIT
.PROCEND
; Copy register-to-register.
; On PA-RISC this is implemented with an `or'.
; The `or' is hidden by a pseudo-operation called `copy'.
b_add
.PROC
.CALLINFO NO_CALLS, FRAME=0
.ENTRY
_loop2
copy %r19,%r20 ; copy register-to-register
copy %r20,%r21 ; use caller-saves registers
copy %r21,%r22
copy %r22,%r21
copy %r21,%r20
copy %r20,%r19
copy %r19,%r20
copy %r20,%r21
addibf,<= -8,%arg0,_loop2 ; decrement counter by 8 and loop
nop
bv,n %r0(%rp)
.EXIT
.PROCEND
; Load memory to a register.
b_load
.PROC
.CALLINFO NO_CALLS, FRAME=0
.ENTRY
_loop3
ldw -4(%sp),%r22 ; load data from frame-marker
ldw -8(%sp),%r22 ; use a caller-saves register
ldw -12(%sp),%r22
ldw -16(%sp),%r22
ldw -20(%sp),%r22
ldw -24(%sp),%r22
ldw -28(%sp),%r22
ldw -32(%sp),%r22
addibf,<= -8,%arg0,_loop3 ; decrement counter by 8 and loop
nop
bv,n %r0(%rp)
.EXIT
.PROCEND
.ALIGN 8
to_call
.WORD b_null
|
chensx00/gem5_custom | 5,024 | ext/systemc/src/sysc/qt/md/sparc.s | /* sparc.s -- assembly support for the `qt' thread building kit. */
/*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
/* #include <machine/trap.h> */
.text
.align 4
.global _qt_blocki
.global _qt_block
.global _qt_abort
.global _qt_start
.global _qt_vstart
/* Register assignment:
// %o0: incoming `helper' function to call after cswap
// also used as outgoing sp of old thread (qt_t *)
// %o1, %o2:
// parameters to `helper' function called after cswap
// %o3: sp of new thread
// %o5: tmp used to save old thread sp, while using %o0
// to call `helper' f() after cswap.
//
//
// Aborting a thread is easy if there are no cached register window
// frames: just switch to the new stack and away we go. If there are
// cached register window frames they must all be written back to the
// old stack before we move to the new stack. If we fail to do the
// writeback then the old stack memory can be written with register
// window contents e.g., after the stack memory has been freed and
// reused.
//
// If you don't believe this, try setting the frame pointer to zero
// once we're on the new stack. This will not affect correctnes
// otherwise because the frame pointer will eventually get reloaded w/
// the new thread's frame pointer. But it will be zero briefly before
// the reload. You will eventually (100,000 cswaps later on a small
// SPARC machine that I tried) get an illegal instruction trap from
// the kernel trying to flush a cached window to location 0x0.
//
// Solution: flush windows before switching stacks, which invalidates
// all the other register windows. We could do the trap
// conditionally: if we're in the lowest frame of a thread, the fp is
// zero already so we know there's nothing cached. But we expect most
// aborts will be done from a first function that does a `save', so we
// will rarely save anything and always pay the cost of testing to see
// if we should flush.
//
// All floating-point registers are caller-save, so this routine
// doesn't need to do anything to save and restore them.
//
// `qt_block' and `qt_blocki' return the same value as the value
// returned by the helper function. We get this ``for free''
// since we don't touch the return value register between the
// return from the helper function and return from qt_block{,i}.
*/
_qt_block:
_qt_blocki:
sub %sp, 8, %sp /* Allocate save area for return pc. */
st %o7, [%sp+64] /* Save return pc. */
_qt_abort:
ta 0x03 /* Save locals and ins. */
mov %sp, %o5 /* Remember old sp w/o chng ins/locals. */
sub %o3, 96, %sp /* Allocate kwsa, switch stacks. */
call %o0, 0 /* Call `helper' routine. */
mov %o5, %o0 /* Pass old thread to qt_after_t() */
/* .. along w/ args in %o1 & %o2. */
/* Restore callee-save regs. The kwsa
// is on this stack, so offset all
// loads by sizeof(kwsa), 64 bytes.
*/
ldd [%sp+ 0+96], %l0
ldd [%sp+ 8+96], %l2
ldd [%sp+16+96], %l4
ldd [%sp+24+96], %l6
ldd [%sp+32+96], %i0
ldd [%sp+40+96], %i2
ldd [%sp+48+96], %i4
ldd [%sp+56+96], %i6
ld [%sp+64+96], %o7 /* Restore return pc. */
retl /* Return to address in %o7. */
add %sp, 104, %sp /* Deallocate kwsa, ret pc area. */
/* The function calling conventions say there has to be a 1-word area
// in the caller's stack to hold a pointer to space for aggregate
// return values. It also says there should be a 6-word area to hold
// %o0..%o5 if the callee wants to save them (why? I don't know...)
// Round up to 8 words to maintain alignment.
//
// Parameter values were stored in callee-save regs and are moved to
// the parameter registers.
*/
_qt_start:
mov %i1, %o0 /* `pu': Set up args to `only'. */
mov %i2, %o1 /* `pt'. */
mov %i4, %o2 /* `userf'. */
call %i5, 0 /* Call client function. */
sub %sp, 32, %sp /* Allocate 6-word callee space. */
call _qt_error, 0 /* `only' erroniously returned. */
nop
/* Same comments as `_qt_start' about allocating rounded-up 7-word
// save areas. */
_qt_vstart:
sub %sp, 32, %sp /* Allocate 7-word callee space. */
call %i5, 0 /* call `startup'. */
mov %i2, %o0 /* .. with argument `pt'. */
add %sp, 32, %sp /* Use 7-word space in varargs. */
ld [%sp+ 4+64], %o0 /* Load arg0 ... */
ld [%sp+ 8+64], %o1
ld [%sp+12+64], %o2
ld [%sp+16+64], %o3
ld [%sp+20+64], %o4
call %i4, 0 /* Call `userf'. */
ld [%sp+24+64], %o5
/* Use 6-word space in varargs. */
mov %o0, %o1 /* Pass return value from userf */
call %i3, 0 /* .. when call `cleanup. */
mov %i2, %o0 /* .. along with argument `pt'. */
call _qt_error, 0 /* `cleanup' erroniously returned. */
nop
|
chensx00/gem5_custom | 4,060 | ext/systemc/src/sysc/qt/md/axp.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
/* axp.s -- assembly support. */
.text
.align 4
.file 2 "axp.s"
.globl qt_block
.globl qt_blocki
.globl qt_abort
.globl qt_start
.globl qt_vstart
/*
** $16: ptr to function to call once curr is suspended
** and control is on r19's stack.
** $17: 1'th arg to (*$16)(...).
** $18: 2'th arg to (*$16)(...).
** $19: sp of thread to resume.
**
** The helper routine returns a value that is passed on as the
** return value from the blocking routine. Since we don't
** touch r0 between the helper's return and the end of
** function, we get this behavior for free.
*/
.ent qt_blocki
qt_blocki:
subq $30,80, $30 /* Allocate save area. */
stq $26, 0($30) /* Save registers. */
stq $9, 8($30)
stq $10,16($30)
stq $11,24($30)
stq $12,32($30)
stq $13,40($30)
stq $14,48($30)
stq $15,56($30)
stq $29,64($30)
.end qt_blocki
.ent qt_abort
qt_abort:
addq $16,$31, $27 /* Put argument function in PV. */
addq $30,$31, $16 /* Save stack ptr in outgoing arg. */
addq $19,$31, $30 /* Set new stack pointer. */
jsr $26,($27),0 /* Call helper function. */
ldq $26, 0($30) /* Restore registers. */
ldq $9, 8($30)
ldq $10,16($30)
ldq $11,24($30)
ldq $12,32($30)
ldq $13,40($30)
ldq $14,48($30)
ldq $15,56($30)
ldq $29,64($30)
addq $30,80, $30 /* Deallocate save area. */
ret $31,($26),1 /* Return, predict===RET. */
.end qt_abort
/*
** Non-varargs thread startup.
*/
.ent qt_start
qt_start:
addq $9,$31, $16 /* Load up `qu'. */
addq $10,$31, $17 /* ... user function's `pt'. */
addq $11,$31, $18 /* ... user function's `userf'. */
addq $12,$31, $27 /* ... set procedure value to `only'. */
jsr $26,($27),0 /* Call `only'. */
jsr $26,qt_error /* `only' erroniously returned. */
.end qt_start
.ent qt_vstart:
qt_vstart:
/* Call startup function. */
addq $9,$31, $16 /* Arg0 to `startup'. */
addq $12,$31, $27 /* Set procedure value. */
jsr $26,($27),0 /* Call `startup'. */
/* Call user function. */
ldt $f16, 0($30) /* Load fp arg regs. */
ldt $f17, 8($30)
ldt $f18,16($30)
ldt $f19,24($30)
ldt $f20,32($30)
ldt $f21,40($30)
ldq $16,48($30) /* And integer arg regs. */
ldq $17,56($30)
ldq $18,64($30)
ldq $19,72($30)
ldq $20,80($30)
ldq $21,88($30)
addq $30,96 $30 /* Pop 6*2*8 saved arg regs. */
addq $11,$31, $27 /* Set procedure value. */
jsr $26,($27),0 /* Call `vuserf'. */
/* Call cleanup. */
addq $9,$31, $16 /* Arg0 to `cleanup'. */
addq $0,$31, $17 /* Users's return value is arg1. */
addq $10,$31, $27 /* Set procedure value. */
jsr $26,($27),0 /* Call `cleanup'. */
jsr $26,qt_error /* Cleanup erroniously returned. */
.end qt_start
/*
** Save calle-save floating-point regs $f2..$f9.
** Also save return pc from whomever called us.
**
** Return value from `qt_block' is the same as the return from
** `qt_blocki'. We get that for free since we don't touch $0
** between the return from `qt_blocki' and the return from
** `qt_block'.
*/
.ent qt_block
qt_block:
subq $30,80, $30 /* Allocate a save space. */
stq $26, 0($30) /* Save registers. */
stt $f2, 8($30)
stt $f3,16($30)
stt $f4,24($30)
stt $f5,32($30)
stt $f6,40($30)
stt $f7,48($30)
stt $f8,56($30)
stt $f9,64($30)
jsr $26,qt_blocki /* Call helper. */
/* .. who will also restore $gp. */
ldq $26, 0($30) /* restore registers. */
ldt $f2, 8($30)
ldt $f3,16($30)
ldt $f4,24($30)
ldt $f5,32($30)
ldt $f6,40($30)
ldt $f7,48($30)
ldt $f8,56($30)
ldt $f9,64($30)
addq $30,80, $30 /* Deallcate save space. */
ret $31,($26),1 /* Return, predict===RET. */
.end qt_block
|
chensx00/gem5_custom | 718 | ext/systemc/src/sysc/qt/md/i386_b.s | /*
// QuickThreads -- Threads-building toolkit.
// Copyright (c) 1993 by David Keppel
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose and without fee is hereby
// granted, provided that the above copyright notice and this notice
// appear in all copies. This software is provided as a
// proof-of-concept and for demonstration purposes; there is no
// representation about the suitability of this software for any
// purpose. */
.globl _b_call_reg
.globl b_call_reg
.globl _b_call_imm
.globl b_call_imm
.globl _b_add
.globl b_add
.globl _b_load
.globl b_load
_b_call_reg:
b_call_reg:
_b_call_imm:
b_call_imm:
_b_add:
b_add:
_b_load:
b_load:
hlt
|
chensx00/gem5_custom | 8,062 | ext/systemc/src/sysc/qt/md/hppa.s | ; pa-risc.s -- assembly support.
; QuickThreads -- Threads-building toolkit.
; Copyright (c) 1993 by David Keppel
;
; Permission to use, copy, modify and distribute this software and
; its documentation for any purpose and without fee is hereby
; granted, provided that the above copyright notice and this notice
; appear in all copies. This software is provided as a
; proof-of-concept and for demonstration purposes; there is no
; representation about the suitability of this software for any
; purpose.
; This file (pa-risc.s) is part of the port of QuickThreads for
; PA-RISC 1.1 architecture. This file implements context switches
; and thread startup. It was written in 1994 by Uwe Reder
; (`uereder@cip.informatik.uni-erlangen.de') for the Operating
; Systems Department (IMMD4) at the University of Erlangen/Nuernberg
; Germany.
; Callee saves general registers gr3..gr18,
; floating-point registers fr12..fr21.
.CODE
.IMPORT $$dyncall, MILLICODE
.IMPORT qt_error, CODE
.EXPORT qt_blocki, ENTRY
.EXPORT qt_block, ENTRY
.EXPORT qt_abort, ENTRY
.EXPORT qt_start, ENTRY
.EXPORT qt_vstart, ENTRY
; arg0: ptr to function (helper) to call once curr is suspended
; and control is on arg3's stack.
; arg1: 1'th arg to *arg0.
; arg2: 2'th arg to *arg0.
; arg3: sp of new thread.
qt_blocki
.PROC
.CALLINFO CALLER, FRAME=0, SAVE_RP, ENTRY_GR=18
.ENTRY
stw %rp,-20(%sp) ; save rp to old frame-marker
stwm %r3,128(%sp) ; save callee-saves general registers
stw %r4,-124(%sp)
stw %r5,-120(%sp)
stw %r6,-116(%sp)
stw %r7,-112(%sp)
stw %r8,-108(%sp)
stw %r9,-104(%sp)
stw %r10,-100(%sp)
stw %r11,-96(%sp)
stw %r12,-92(%sp)
stw %r13,-88(%sp)
stw %r14,-84(%sp)
stw %r15,-80(%sp)
stw %r16,-76(%sp)
stw %r17,-72(%sp)
stw %r18,-68(%sp)
qt_abort
copy %arg0,%r22 ; helper to be called by $$dyncall
copy %sp,%arg0 ; pass current sp as arg0 to helper
copy %arg3,%sp ; set new sp
.CALL
bl $$dyncall,%mrp ; call helper
copy %mrp,%rp
ldw -68(%sp),%r18 ; restore general registers
ldw -72(%sp),%r17
ldw -76(%sp),%r16
ldw -80(%sp),%r15
ldw -84(%sp),%r14
ldw -88(%sp),%r13
ldw -92(%sp),%r12
ldw -96(%sp),%r11
ldw -100(%sp),%r10
ldw -104(%sp),%r9
ldw -108(%sp),%r8
ldw -112(%sp),%r7
ldw -116(%sp),%r6
ldw -120(%sp),%r5
ldw -124(%sp),%r4
ldw -148(%sp),%rp ; restore return-pointer
bv %r0(%rp) ; return to caller
ldwm -128(%sp),%r3
.EXIT
.PROCEND
qt_block
.PROC
.CALLINFO CALLER, FRAME=0, SAVE_RP, ENTRY_FR=21
.ENTRY
stw %rp,-20(%sp) ; save rp to old frame-marker
fstds,ma %fr12,8(%sp) ; save callee-saves float registers
fstds,ma %fr13,8(%sp)
fstds,ma %fr14,8(%sp)
fstds,ma %fr15,8(%sp)
fstds,ma %fr16,8(%sp)
fstds,ma %fr17,8(%sp)
fstds,ma %fr18,8(%sp)
fstds,ma %fr19,8(%sp)
fstds,ma %fr20,8(%sp)
fstds,ma %fr21,8(%sp)
.CALL
bl qt_blocki,%rp
ldo 48(%sp),%sp
ldo -48(%sp),%sp
fldds,mb -8(%sp),%fr21 ; restore callee-saves float registers
fldds,mb -8(%sp),%fr20
fldds,mb -8(%sp),%fr19
fldds,mb -8(%sp),%fr18
fldds,mb -8(%sp),%fr17
fldds,mb -8(%sp),%fr16
fldds,mb -8(%sp),%fr15
fldds,mb -8(%sp),%fr14
fldds,mb -8(%sp),%fr13
ldw -28(%sp),%rp ; restore return-pointer
bv %r0(%rp) ; return to caller.
fldds,mb -8(%sp),%fr12
.EXIT
.PROCEND
qt_start
.PROC
.CALLINFO CALLER, FRAME=0
.ENTRY
copy %r18,%arg0 ; set user arg `pu'.
copy %r17,%arg1 ; ... user function pt.
copy %r16,%arg2 ; ... user function userf.
; %r22 is a caller-saves register
copy %r15,%r22 ; function to be called by $$dyncall
.CALL ; in=%r22
bl $$dyncall,%mrp ; call `only'.
copy %mrp,%rp
bl,n qt_error,%r0 ; `only' erroniously returned.
.EXIT
.PROCEND
; Varargs
;
; First, call `startup' with the `pt' argument.
;
; Next, call the user's function with all arguments.
; We don't know whether arguments are integers, 32-bit floating-points or
; even 64-bit floating-points, so we reload all the registers, possibly
; with garbage arguments. The thread creator provided non-garbage for
; the arguments that the callee actually uses, so the callee never gets
; garbage.
;
; -48 -44 -40 -36 -32
; | arg3 | arg2 | arg1 | arg0 |
; -----------------------------
; integers: arg3 arg2 arg1 arg0
; 32-bit fps: farg3 farg2 farg1 farg0
; 64-bit fps: <---farg3--> <---farg1-->
;
; Finally, call `cleanup' with the `pt' argument and with the return value
; from the user's function. It is an error for `cleanup' to return.
qt_vstart
.PROC
.CALLINFO CALLER, FRAME=0
.ENTRY
; Because the startup function may damage the fixed arguments
; on the stack (PA-RISC Procedure Calling Conventions Reference
; Manual, 2.4 Fixed Arguments Area), we allocate a seperate
; stack frame for it.
ldo 64(%sp),%sp
; call: void startup(void *pt)
copy %r15,%arg0 ; `pt' is arg0 to `startup'.
copy %r16,%r22
.CALL
bl $$dyncall,%mrp ; Call `startup'.
copy %mrp,%rp
ldo -64(%sp),%sp
; call: void *qt_vuserf_t(...)
ldw -36(%sp),%arg0 ; Load args to integer registers.
ldw -40(%sp),%arg1
ldw -44(%sp),%arg2
ldw -48(%sp),%arg3
; Index of fld[w|d]s only ranges from -16 to 15, so we
; take r22 to be our new base register.
ldo -32(%sp),%r22
fldws -4(%r22),%farg0 ; Load args to floating-point registers.
fldds -8(%r22),%farg1
fldws -12(%r22),%farg2
fldds -16(%r22),%farg3
copy %r17,%r22
.CALL
bl $$dyncall,%mrp ; Call `userf'.
copy %mrp,%rp
; call: void cleanup(void *pt, void *vuserf_return)
copy %r15,%arg0 ; `pt' is arg0 to `cleanup'.
copy %ret0,%arg1 ; Return-value is arg1 to `cleanup'.
copy %r18,%r22
.CALL
bl $$dyncall,%mrp ; Call `cleanup'.
copy %mrp,%rp
bl,n qt_error,%r0
.EXIT
.PROCEND
|
chensx00/gem5_custom | 1,906 | ext/systemc/src/sysc/qt/md/vax.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.text
.globl _qt_abort
.globl _qt_block
.globl _qt_blocki
.globl _qt_start
.globl _qt_vstart
/*
// Calls to these routines have the signature
//
// void *block (func, arg1, arg2, newsp)
//
// Since the prologue saves 5 registers, nargs, pc, fp, ap, mask, and
// a condition handler (at sp+0), the first argument is 40=4*10 bytes
// offset from the stack pointer.
*/
_qt_block:
_qt_blocki:
_qt_abort:
.word 0x7c0 /* Callee-save mask: 5 registers. */
movl 56(sp),r1 /* Get stack pointer of new thread. */
movl 52(sp),-(r1) /* Push arg2 */
movl 48(sp),-(r1) /* Push arg1 */
movl sp,-(r1) /* Push arg0 */
movl 44(sp),r0 /* Get helper to call. */
movl r1,sp /* Move to new thread's stack. */
addl3 sp,$12,fp /* .. including the frame pointer. */
calls $3,(r0) /* Call helper. */
ret
_qt_start:
movl (sp)+,r0 /* Get `only'. */
calls $3,(r0) /* Call `only'. */
calls $0,_qt_error /* `only' erroniously returned. */
_qt_vstart:
movl (sp)+,r10 /* Get `pt'. */
movl (sp)+,r9 /* Get `startup'. */
movl (sp)+,r8 /* Get `vuserf'. */
movl (sp)+,r7 /* Get `cleanup'. */
pushl r10 /* Push `qt'. */
calls $1,(r9) /* Call `startup', pop `qt' on return. */
calls (sp)+,(r8) /* Call user's function. */
pushl r0 /* Push `vuserf_retval'. */
pushl r10 /* Push `qt'. */
calls $2,(r7) /* Call `cleanup', never return. */
calls $0,_qt_error /* `cleanup' erroniously returned. */
|
chensx00/gem5_custom | 18,012 | ext/systemc/src/sysc/qt/md/powerpc_mach.s | /* powerpc_mach.s -- assembly support. */
/*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
* PowerPC-Mach thread switching module.
* Darwin (MacOS X) assembly
*
* NOTICE: Syntax for register names is not the GNU one. Register are
* named "rx" and "fx", not "%rx" and "%fx" as usual for the GNU "as" tool.
* Darwin "as" tool is based on GNU "as" but follows the "official" PowerPC
* syntax.
*
*
* This software is largely based on the original PowerPC-Linux porting
* developed by Ken Aaker <kenaaker@silverbacksystems.com>
*
* Marco Bucci <marco.bucci@inwind.it>
* December 2002
*
*/
/*
*
* PowerPC Register convections:
*
* r0 volatile
* r1 SP
* r2 system reserved
* r3-r4 volatile for parameter passing and function return
* r5-r10 volatile for parameter passing
* r11-r12 volatile
* r13-r14 non volatile registers
* f0 volatile
* f1 volatile for parameter passing and function return
* f2-f13 volatile for parameter passing
* f14-f31 non volatile
*
* cr2-cr4 non volatile
*
*
* See on the heather file for more documentation.
*
*
*
* IMPLEMENTATION NOTES
*
*
* 1) Condition register saving
* On most machines, the condition code register is caller-save.
* On the PPC, the condition code register is callee-save, so the
* thread context switch must preserve it.
*
*
* 2) Floating point registers saving
* On resuming a thread, floating point registers are or not restored just
* depending on which block routine suspended the thread (i.e. regardless
* whether "qt_block", "qt_blocki" or "qt_abort" is used to resume it).
* This behaviour is obtained by implementing "qt_block" by means af a nested
* call to "qt_blocki". As a result, the blocking of a thread always goes
* and returns through "qt_blocki and, if a thread was blocked by "qt_block",
* its execution resumes from the floating point restoring code on exit
* of "qt_block".
*
* Thanks to David Keppel that explained me this "simple" trick.
*
*
* 3) C languace code debugging
* This software was developed and debugged using the Metrowerks
* Code Warrior PPC integrated assembler. It can be still used with the
* Code Warrior compiler by means of the file "powerpc_mach_asm_debug.c"
* that include it.
* In order to avoid "copy and paste" bugs, and make easyer the maintaining,
* I made the minimal changes, so you can find some strange code as:
*
* #if 0
* .if 0
* C code here
* .endif
* #endif
*
* This is just to embed some C code that is needed by the Code Warrior
* integrated assembler.
*
*
* 4) Assembly constants generation
* Constants used in the assembly code are generated by running
* the C code in the sequel (commented). It uses the C macros declared in
* the C heather in order to guarantee that the C interface and the assebly
* code are "aligned". I avoided the use of an assebler preprocessor since
* they are not so standard and moreover using macro espressions makes the
* assembly debugging more difficult.
*
*
#include <iostream>
#include "powerpc_mach.h"
int main()
{
using namespace std;
int i;
cout << ".set LR_SAVE, " << PPC_LR_SAVE << endl;
cout << ".set CR_SAVE, " << PPC_CR_SAVE << endl;
cout << ".set BLOCKI_FSIZE, " << QUICKTHREADS_BLOCKI_FRAME_SIZE << endl;
cout << ".set BLOCK_FSIZE, " << QUICKTHREADS_BLOCK_FRAME_SIZE << endl;
cout << endl;
for(i=0; i<12; i++)
cout << ".set PAR_" << i << ", " << PPC_PAR(i) << endl;
cout << endl;
i = 13;
cout << ".set GPR_SAVE_" << i << ", " << QUICKTHREADS_BLOCKI_GPR_SAVE(i) << endl;
cout << endl;
for(i=31; i>13; i--)
cout << ".set FPR_SAVE_" << i << ", " << QUICKTHREADS_BLOCK_FPR_SAVE(i) << endl;
cout << endl;
cout << ".set VARGS_BKOFF, " << QUICKTHREADS_VARGS_BKOFF << endl;
cout << endl << endl << endl;
for(i=31; i>13; i--)
cout << "\tstfd\tf" << i << ",FPR_SAVE_" << i << "(r1)" << endl;
cout << endl;
for(i=31; i>13; i--)
cout << "\tlfd \tf" << i << ",FPR_SAVE_" << i << "(r1)" << endl;
cout << endl << endl << endl;
return 0;
}
*
*
*
*/
#if 0
.text
.align 4
.globl qt_block
.globl _qt_block
.globl qt_blocki
.globl _qt_blocki
.globl qt_abort
.globl _qt_abort
.globl qt_start
.globl _qt_start
.globl qt_vstart
.globl _qt_vstart
.set LR_SAVE, 8
.set CR_SAVE, 4
.set BLOCKI_FSIZE, 128
.set BLOCK_FSIZE, 192
.set PAR_0, 24
.set PAR_1, 28
.set PAR_2, 32
.set PAR_3, 36
.set PAR_4, 40
.set PAR_5, 44
.set PAR_6, 48
.set PAR_7, 52
.set PAR_8, 56
.set PAR_9, 60
.set PAR_10, 64
.set PAR_11, 68
.set GPR_SAVE_13, 52
.set FPR_SAVE_31, 184
.set FPR_SAVE_30, 176
.set FPR_SAVE_29, 168
.set FPR_SAVE_28, 160
.set FPR_SAVE_27, 152
.set FPR_SAVE_26, 144
.set FPR_SAVE_25, 136
.set FPR_SAVE_24, 128
.set FPR_SAVE_23, 120
.set FPR_SAVE_22, 112
.set FPR_SAVE_21, 104
.set FPR_SAVE_20, 96
.set FPR_SAVE_19, 88
.set FPR_SAVE_18, 80
.set FPR_SAVE_17, 72
.set FPR_SAVE_16, 64
.set FPR_SAVE_15, 56
.set FPR_SAVE_14, 48
/* various offsets used by "qt_varg" */
.set P_T, PAR_0
.set P_STARTUP, PAR_1
.set P_USERF, PAR_2
.set P_CLEANUP, PAR_3
/* the offset used to move back the linkage area to be adiacent to
* the variant argument list before calling "userf(...) */
.set VARGS_BKOFF, 16 /* skip "t", "startup", "userf" and "cleanup" */
/* location where "t" and "cleanup" are saved (with respect of
* the stack frame base) */
.set P_T_SAVE, -4
.set P_CLEANUP_SAVE, -8
#endif
/* Block the current thread saving all integer non volatile registers and
* start a new thread.
*/
#if 0
.if 0
#endif
void *qt_blocki (void *helper, void *a0, void *a1, void *newthread);
asm void *qt_blocki (void *helper, void *a0, void *a1, void *newthread)
{
#if 0
.endif
#endif
#if 0
qt_blocki:
_qt_blocki:
#endif
/* prolog code */
stwu r1,-BLOCKI_FSIZE(r1) /* allocate the stack frame */
mflr r0 /* return addr in r0 */
mfcr r11 /* CR in r11 */
stw r0,LR_SAVE+BLOCKI_FSIZE(r1) /* save return addr in the stack */
stw r11,CR_SAVE+BLOCKI_FSIZE(r1) /* save CR in the stack */
stmw r13,GPR_SAVE_13(r1) /* save non-volatile reg */
/* call helper(qt_t *old, void *a0, void *a1) */
mtlr r3 /* "helper" addr in the link reg */
mr r3,r1 /* current thread (i.e. the SP) in arg "old" */
mr r1,r6 /* swap to the new thread (i.e. to its SP) */
blrl /* jump to "helper" */
/* the "helper" return value is returned (since r3 is not changed) */
/* epilog code: return to the new thread's "qt_blocki" caller */
lmw r13,GPR_SAVE_13(r1) /* restore non-volatile reg */
lwz r0,LR_SAVE+BLOCKI_FSIZE(r1) /* recover return addr */
lwz r11,CR_SAVE+BLOCKI_FSIZE(r1) /* recover CR */
mtlr r0 /* return address in the link reg */
mtcr r11 /* restore CR */
addi r1,r1,BLOCKI_FSIZE /* free the stack frame */
blr /* return */
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Abort the current thread and start a new thread.
*/
#if 0
.if 0
#endif
void qt_abort (void *helper, void *a0, void *a1, void *newthread);
asm void qt_abort (void *helper, void *a0, void *a1, void *newthread)
{
#if 0
.endif
#endif
#if 0
qt_abort:
_qt_abort:
#endif
/* prolog code */
/* there is no prolog. It will never come back */
/* call helper(qt_t *old, void *a0, void *a1) */
mtlr r3 /* "helper" addr in the link reg */
mr r1,r6 /* swap to the new thread (i.e. to its SP) */
/* we don't need to set "old", we can pass just garbage. Actually, since r3
is not changed, "old" is set to "helper" (don't care) */
blrl /* call "helper" */
/* the "helper" return value is returned (since r3 is not changed) */
/* epilog code: return to the new thread's "qt_blocki" caller */
lmw r13,GPR_SAVE_13(r1) /* restore non-volatile reg */
lwz r0,LR_SAVE+BLOCKI_FSIZE(r1) /* recover return addr */
lwz r11,CR_SAVE+BLOCKI_FSIZE(r1) /* recover CR */
mtlr r0 /* return address in the link reg */
mtcr r11 /* restore CR */
addi r1,r1,BLOCKI_FSIZE /* free the stack frame */
blr /* return */
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Block the current thread saving all non volatile registers and start
* a new thread.
*/
#if 0
.if 0
#endif
void *qt_block (void *helper, void *a0, void *a1, void *newthread);
asm void *qt_block (void *helper, void *a0, void *a1, void *newthread)
{
#if 0
.endif
#endif
# if 0
qt_block:
_qt_block:
#endif
/* prolog code */
stwu r1,-BLOCK_FSIZE(r1) /* allocate the stack frame */
mflr r0 /* return addr in r0 */
stw r0,LR_SAVE+BLOCK_FSIZE(r1) /* save return addr in the stack */
/* save non-volatile fp reg */
stfd f31,FPR_SAVE_31(r1)
stfd f30,FPR_SAVE_30(r1)
stfd f29,FPR_SAVE_29(r1)
stfd f28,FPR_SAVE_28(r1)
stfd f27,FPR_SAVE_27(r1)
stfd f26,FPR_SAVE_26(r1)
stfd f25,FPR_SAVE_25(r1)
stfd f24,FPR_SAVE_24(r1)
stfd f23,FPR_SAVE_23(r1)
stfd f22,FPR_SAVE_22(r1)
stfd f21,FPR_SAVE_21(r1)
stfd f20,FPR_SAVE_20(r1)
stfd f19,FPR_SAVE_19(r1)
stfd f18,FPR_SAVE_18(r1)
stfd f17,FPR_SAVE_17(r1)
stfd f16,FPR_SAVE_16(r1)
stfd f15,FPR_SAVE_15(r1)
stfd f14,FPR_SAVE_14(r1)
/* block the thread */
bl qt_blocki
/* the thread is going to be resumed */
/* restore non-volatile fp reg */
lfd f31,FPR_SAVE_31(r1)
lfd f30,FPR_SAVE_30(r1)
lfd f29,FPR_SAVE_29(r1)
lfd f28,FPR_SAVE_28(r1)
lfd f27,FPR_SAVE_27(r1)
lfd f26,FPR_SAVE_26(r1)
lfd f25,FPR_SAVE_25(r1)
lfd f24,FPR_SAVE_24(r1)
lfd f23,FPR_SAVE_23(r1)
lfd f22,FPR_SAVE_22(r1)
lfd f21,FPR_SAVE_21(r1)
lfd f20,FPR_SAVE_20(r1)
lfd f19,FPR_SAVE_19(r1)
lfd f18,FPR_SAVE_18(r1)
lfd f17,FPR_SAVE_17(r1)
lfd f16,FPR_SAVE_16(r1)
lfd f15,FPR_SAVE_15(r1)
lfd f14,FPR_SAVE_14(r1)
lwz r0,LR_SAVE+BLOCK_FSIZE(r1) /* recover return addr */
mtlr r0 /* return address in the link reg */
addi r1,r1,BLOCK_FSIZE /* free the stack frame */
blr /* return */
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Start a single argument thread using parameters preloaded in the stack
* during thread initialization (see comments on stack initialization in the
* heather file).
*
* Executes:
*
* only(u, t, userf);
*/
#if 0
.if 0
#endif
void qt_start(void);
asm void qt_start(void)
{
#if 0
.endif
#endif
#if 0
qt_start:
_qt_start:
#endif
lwz r3,PAR_0(r1) /* "u" in r3 */
lwz r4,PAR_1(r1) /* "t" in r4 */
lwz r5,PAR_2(r1) /* "userf" in r5 */
lwz r6,PAR_3(r1) /* "only" in r6 */
mtlr r6 /* "only" address in the link reg */
/* call only(u, t, userf) */
blrl /* jump to "only" */
/* error if it returns */
b _qt_error
/* dead code (some inline asm "wants" the epilog, or they genetare it) */
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Start a variant argument thread using parameters preloaded in the stack
* during thread initialization (see comments on stack initialization in the
* heather file).
*
* Executes:
*
* startup(t);
* userf_return = userf(...);
* cleanup(pt, userf_return);
*
***** Stack layout on start *****
backchain -> STACK BOTTOM (higher address)
+==========================+
backchain - 4 -> | |
+ LOCAL VARIABLES AREA +
..............
+ +
| |
+--------------------------+
| |
+ ALIGNMEBNT PAD +
..............
+ (if needed) +
| |
+--------------------------+
| | arg(n)
+ +
| |
+ VARIABLE ARGUMENT LIST +
..............
+ for userf call +
SP + PAR(5) -> | | arg(1)
+ +
SP + PAR(4) -> | | arg(0)
+--------------------------+
SP + PAR(3) -> | | cleanup par
+ +
SP + PAR(2) -> | | userf par
+ PARAMETER AREA +
SP + PAR(1) -> | | startup par
+ +
SP + PAR(0) -> | | t par
+--------------------------+
| |
+ LINKAGE AREA +
SP -> | |
+==========================+
STACK TOP (lower address)
Stack grows down
|
V
***** Stack layout before call userf *****
backchain -> STACK BOTTOM (higher address)
+==========================+
backchain - 4 -> | |
+ LOCAL VARIABLES AREA +
..............
+ +
| |
+--------------------------+
| |
+ ALIGNMEBNT PAD +
..............
+ (if needed) +
| |
+--------------------------+
| | arg(n)
+ +
| |
+ VARIABLE ARGUMENT LIST +
..............
+ for userf call +
SP + PAR(1) -> | | arg(1)
+ +
SP + PAR(0) -> | | arg(0)
+--------------------------+
| |
+ LINKAGE AREA +
SP -> | |
+==========================+
STACK TOP (lower address)
Stack grows down
|
V
* To call "userf(...)", the argument list must be adiacent to the linkage
* area. Instead of copy the argument list, we move back the linkage area
* (actually, we just increase the SP and copy the backchain). "t" and
* "cleanup" are saved in a local variable area in order to call
* cleanup(pt, userf_return).
*/
#if 0
.if 0
#endif
void qt_vstart(void);
asm void qt_vstart(void)
{
#if 0
.endif
#endif
#if 0
qt_vstart:
_qt_vstart:
#endif
/* NOTICE: the callee routines could save parameter registers in the caller's
* stack parameter area. We put "t" in PAR(0) in such a way, if startup(t)
* will save "t", it will be saved on the same location thus not delething
* any other parameter.
*/
/* since we will move back the linckage area (to make it adiacent to the
* parameter list), we need to save "t" and "cleanup". We have made room for
* this on the bottom of the stack frame. */
/* save parameters in the local variable area */
lwz r11,0(r1) /* get the backchain */
lwz r3,P_T(r1)
lwz r4,P_CLEANUP(r1)
stw r3,P_T_SAVE(r11) /* save "pt" */
stw r4,P_CLEANUP_SAVE(r11) /* save "cleanup" */
/* call startup(t) */
lwz r5,P_STARTUP(r1)
mtlr r5
blrl /* call "startup" */
/* call userf(...) */
lwz r11,0(r1) /* reload backchain (r11 is volatile) */
lwz r4,P_USERF(r1) /* load "userf" */
mtlr r4
/* first eight parameter of the variant list must be copyed in
* GPR3-GPR10. There is a four places offset due to "t", "startup",
* userf" and "cleanup" */
lwz r3,PAR_4(r1)
lwz r4,PAR_5(r1)
lwz r5,PAR_6(r1)
lwz r6,PAR_7(r1)
lwz r7,PAR_8(r1)
lwz r8,PAR_9(r1)
lwz r9,PAR_10(r1)
lwz r10,PAR_11(r1)
/* move the linkage area to be adiacent to the argument list */
stw r11,VARGS_BKOFF(r1) /* copy backchain */
addi r1,r1,VARGS_BKOFF /* move back the stack */
blrl /* call "userf" */
/* call qt_cleanup(void *pt, void *vuserf_return) */
lwz r11,0(r1) /* reload backchain (r11 is volatile) */
mr r4,r3 /* push "userf" return as 2nd parameter */
lwz r3,P_T_SAVE(r11) /* reload "pt" */
lwz r5,P_CLEANUP_SAVE(r11) /* reload "cleanup" */
mtlr r5
blrl
b _qt_error
/* dead code (some inline asm "wanst" the epilog, or they genetare it) */
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
|
chensx00/gem5_custom | 5,790 | ext/systemc/src/sysc/qt/md/mips-irix5.s | /* mips.s -- assembly support. */
/*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
/* Callee-save $16-$23, $30-$31.
*
* $25 is used as a procedure value pointer, used to discover constants
* in a callee. Thus, each caller here sets $25 before the call.
*
* On startup, restore regs so retpc === call to a function to start.
* We're going to call a function ($4) from within this routine.
* We're passing 3 args, therefore need to allocate 12 extra bytes on
* the stack for a save area. The start routine needs a like 16-byte
* save area. Must be doubleword aligned (_mips r3000 risc
* architecture_, gerry kane, pg d-23).
*/
/*
* Modified by Assar Westerlund <assar@sics.se> to support Irix 5.x
* calling conventions for dynamically-linked code.
*/
/* Make this position-independent code. */
.option pic2
.globl qt_block
.globl qt_blocki
.globl qt_abort
.globl qt_start
.globl qt_vstart
/*
** $4: ptr to function to call once curr is suspended
** and control is on $7's stack.
** $5: 1'th arg to $4.
** $6: 2'th arg to $4
** $7: sp of thread to suspend.
**
** Totally gross hack: The MIPS calling convention reserves
** 4 words on the stack for a0..a3. This routine "ought" to
** allocate space for callee-save registers plus 4 words for
** the helper function, but instead we use the 4 words
** provided by the function that called us (we don't need to
** save our argument registers). So what *appears* to be
** allocating only 40 bytes is actually allocating 56, by
** using the caller's 16 bytes.
**
** The helper routine returns a value that is passed on as the
** return value from the blocking routine. Since we don't
** touch $2 between the helper's return and the end of
** function, we get this behavior for free.
*/
qt_blocki:
sub $sp,$sp,40 /* Allocate reg save space. */
sw $16, 0+16($sp)
sw $17, 4+16($sp)
sw $18, 8+16($sp)
sw $19,12+16($sp)
sw $20,16+16($sp)
sw $21,20+16($sp)
sw $22,24+16($sp)
sw $23,28+16($sp)
sw $30,32+16($sp)
sw $31,36+16($sp)
add $2, $sp,$0 /* $2 <= old sp to pass to func@$4. */
qt_abort:
add $sp, $7,$0 /* $sp <= new sp. */
.set noreorder
add $25, $4,$0 /* Set helper function procedure value. */
jal $31,$25 /* Call helper func@$4 . */
add $4, $2,$0 /* $a0 <= pass old sp as a parameter. */
.set reorder
lw $31,36+16($sp) /* Restore callee-save regs... */
lw $30,32+16($sp)
lw $23,28+16($sp)
lw $22,24+16($sp)
lw $21,20+16($sp)
lw $20,16+16($sp)
lw $19,12+16($sp)
lw $18, 8+16($sp)
lw $17, 4+16($sp)
lw $16, 0+16($sp) /* Restore callee-save */
add $sp,$sp,40 /* Deallocate reg save space. */
j $31 /* Return to caller. */
/*
** Non-varargs thread startup.
** Note: originally, 56 bytes were allocated on the stack.
** The thread restore routine (_blocki/_abort) removed 40
** of them, which means there is still 16 bytes for the
** argument area required by the MIPS calling convention.
*/
qt_start:
add $4, $16,$0 /* Load up user function pu. */
add $5, $17,$0 /* ... user function pt. */
add $6, $18,$0 /* ... user function userf. */
add $25, $19,$0 /* Set `only' procedure value. */
jal $31,$25 /* Call `only'. */
la $25,qt_error /* Set `qt_error' procedure value. */
j $25
/*
** Save calle-save floating-point regs $f20-$f30
** See comment in `qt_block' about calling conventinos and
** reserved space. Use the same trick here, but here we
** actually have to allocate all the bytes since we have to
** leave 4 words leftover for `qt_blocki'.
**
** Return value from `qt_block' is the same as the return from
** `qt_blocki'. We get that for free since we don't touch $2
** between the return from `qt_blocki' and the return from
** `qt_block'.
*/
qt_block:
sub $sp, $sp,56 /* 6 8-byte regs, saved ret pc, aligned. */
swc1 $f20, 0+16($sp)
swc1 $f22, 8+16($sp)
swc1 $f24, 16+16($sp)
swc1 $f26, 24+16($sp)
swc1 $f28, 32+16($sp)
swc1 $f30, 40+16($sp)
sw $31, 48+16($sp)
jal qt_blocki
lwc1 $f20, 0+16($sp)
lwc1 $f22, 8+16($sp)
lwc1 $f24, 16+16($sp)
lwc1 $f26, 24+16($sp)
lwc1 $f28, 32+16($sp)
lwc1 $f30, 40+16($sp)
lw $31, 48+16($sp)
add $sp, $sp,56
j $31
/*
** First, call `startup' with the `pt' argument.
**
** Next, call the user's function with all arguments.
** Note that we don't know whether args were passed in
** integer regs, fp regs, or on the stack (See Gerry Kane
** "MIPS R2000 RISC Architecture" pg D-22), so we reload
** all the registers, possibly with garbage arguments.
**
** Finally, call `cleanup' with the `pt' argument and with
** the return value from the user's function. It is an error
** for `cleanup' to return.
*/
qt_vstart:
add $4, $17,$0 /* `pt' is arg0 to `startup'. */
add $25, $18,$0 /* Set `startup' procedure value. */
jal $31, $25 /* Call `startup'. */
add $sp, $sp,16 /* Free extra save space. */
lw $4, 0($sp) /* Load up args. */
lw $5, 4($sp)
lw $6, 8($sp)
lw $7, 12($sp)
lwc1 $f12, 0($sp) /* Load up fp args. */
lwc1 $f14, 8($sp)
add $25, $19,$0 /* Set `userf' procedure value. */
jal $31,$25 /* Call `userf'. */
add $4, $17,$0 /* `pt' is arg0 to `cleanup'. */
add $5, $2,$0 /* Ret. val is arg1 to `cleanup'. */
add $25, $16,$0 /* Set `cleanup' procedure value. */
jal $31, $25 /* Call `cleanup'. */
la $25,qt_error /* Set `qt_error' procedure value. */
j $25
|
chensx00/gem5_custom | 5,084 | ext/systemc/src/sysc/qt/md/mips.s | /* mips.s -- assembly support. */
/*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
/* Callee-save $16-$23, $30-$31.
*
* On startup, restore regs so retpc === call to a function to start.
* We're going to call a function ($4) from within this routine.
* We're passing 3 args, therefore need to allocate 12 extra bytes on
* the stack for a save area. The start routine needs a like 16-byte
* save area. Must be doubleword aligned (_mips r3000 risc
* architecture_, gerry kane, pg d-23).
*/
.globl qt_block
.globl qt_blocki
.globl qt_abort
.globl qt_start
.globl qt_vstart
/*
** $4: ptr to function to call once curr is suspended
** and control is on $7's stack.
** $5: 1'th arg to $4.
** $6: 2'th arg to $4
** $7: sp of thread to suspend.
**
** Totally gross hack: The MIPS calling convention reserves
** 4 words on the stack for a0..a3. This routine "ought" to
** allocate space for callee-save registers plus 4 words for
** the helper function, but instead we use the 4 words
** provided by the function that called us (we don't need to
** save our argument registers). So what *appears* to be
** allocating only 40 bytes is actually allocating 56, by
** using the caller's 16 bytes.
**
** The helper routine returns a value that is passed on as the
** return value from the blocking routine. Since we don't
** touch $2 between the helper's return and the end of
** function, we get this behavior for free.
*/
qt_blocki:
sub $sp,$sp,40 /* Allocate reg save space. */
sw $16, 0+16($sp)
sw $17, 4+16($sp)
sw $18, 8+16($sp)
sw $19,12+16($sp)
sw $20,16+16($sp)
sw $21,20+16($sp)
sw $22,24+16($sp)
sw $23,28+16($sp)
sw $30,32+16($sp)
sw $31,36+16($sp)
add $2, $sp,$0 /* $2 <= old sp to pass to func@$4. */
qt_abort:
add $sp, $7,$0 /* $sp <= new sp. */
.set noreorder
jal $31,$4 /* Call helper func@$4 . */
add $4, $2,$0 /* $a0 <= pass old sp as a parameter. */
.set reorder
lw $31,36+16($sp) /* Restore callee-save regs... */
lw $30,32+16($sp)
lw $23,28+16($sp)
lw $22,24+16($sp)
lw $21,20+16($sp)
lw $20,16+16($sp)
lw $19,12+16($sp)
lw $18, 8+16($sp)
lw $17, 4+16($sp)
lw $16, 0+16($sp) /* Restore callee-save */
add $sp,$sp,40 /* Deallocate reg save space. */
j $31 /* Return to caller. */
/*
** Non-varargs thread startup.
** Note: originally, 56 bytes were allocated on the stack.
** The thread restore routine (_blocki/_abort) removed 40
** of them, which means there is still 16 bytes for the
** argument area required by the MIPS calling convention.
*/
qt_start:
add $4, $16,$0 /* Load up user function pu. */
add $5, $17,$0 /* ... user function pt. */
add $6, $18,$0 /* ... user function userf. */
jal $31,$19 /* Call `only'. */
j qt_error
/*
** Save calle-save floating-point regs $f20-$f30
** See comment in `qt_block' about calling conventinos and
** reserved space. Use the same trick here, but here we
** actually have to allocate all the bytes since we have to
** leave 4 words leftover for `qt_blocki'.
**
** Return value from `qt_block' is the same as the return from
** `qt_blocki'. We get that for free since we don't touch $2
** between the return from `qt_blocki' and the return from
** `qt_block'.
*/
qt_block:
sub $sp, $sp,56 /* 6 8-byte regs, saved ret pc, aligned. */
swc1 $f20, 0+16($sp)
swc1 $f22, 8+16($sp)
swc1 $f24, 16+16($sp)
swc1 $f26, 24+16($sp)
swc1 $f28, 32+16($sp)
swc1 $f30, 40+16($sp)
sw $31, 48+16($sp)
jal qt_blocki
lwc1 $f20, 0+16($sp)
lwc1 $f22, 8+16($sp)
lwc1 $f24, 16+16($sp)
lwc1 $f26, 24+16($sp)
lwc1 $f28, 32+16($sp)
lwc1 $f30, 40+16($sp)
lw $31, 48+16($sp)
add $sp, $sp,56
j $31
/*
** First, call `startup' with the `pt' argument.
**
** Next, call the user's function with all arguments.
** Note that we don't know whether args were passed in
** integer regs, fp regs, or on the stack (See Gerry Kane
** "MIPS R2000 RISC Architecture" pg D-22), so we reload
** all the registers, possibly with garbage arguments.
**
** Finally, call `cleanup' with the `pt' argument and with
** the return value from the user's function. It is an error
** for `cleanup' to return.
*/
qt_vstart:
add $4, $17,$0 /* `pt' is arg0 to `startup'. */
jal $31, $18 /* Call `startup'. */
add $sp, $sp,16 /* Free extra save space. */
lw $4, 0($sp) /* Load up args. */
lw $5, 4($sp)
lw $6, 8($sp)
lw $7, 12($sp)
lwc1 $f12, 0($sp) /* Load up fp args. */
lwc1 $f14, 8($sp)
jal $31,$19 /* Call `userf'. */
add $4, $17,$0 /* `pt' is arg0 to `cleanup'. */
add $5, $2,$0 /* Ret. val is arg1 to `cleanup'. */
jal $31, $16 /* Call `cleanup'. */
j qt_error
|
chensx00/gem5_custom | 1,336 | ext/systemc/src/sysc/qt/md/vax_b.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.text
.globl _b_call_reg
.globl _b_call_imm
.globl _b_add
.globl _b_load
_b_null:
.word 0x0
ret
_b_call_reg:
.word 0x0
movl 4(ap),r0
moval _b_null,r1
L0:
calls $0,(r1)
calls $0,(r1)
calls $0,(r1)
calls $0,(r1)
calls $0,(r1)
subl2 $5,r0
bgtr L0
ret
_b_call_imm:
.word 0x0
movl 4(ap),r0
L1:
calls $0,_b_null
calls $0,_b_null
calls $0,_b_null
calls $0,_b_null
calls $0,_b_null
subl2 $5,r0
bgtr L1
ret
_b_add:
.word 0x0
movl 4(ap),r0
L2:
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
subl2 $1,r0
bgtr L2
ret
_b_load:
.word 0x0
movl 4(ap),r0
L3:
movl 0(sp),r1
movl 4(sp),r1
movl 8(sp),r1
movl 12(sp),r1
movl 16(sp),r1
movl 20(sp),r1
movl 24(sp),r1
movl 28(sp),r1
movl 32(sp),r1
movl 36(sp),r1
subl2 $1,r0
bgtr L3
ret
|
chensx00/gem5_custom | 18,062 | ext/systemc/src/sysc/qt/md/powerpc_sys5.s | /* powerpc-sys5.s -- assembly support. */
/*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
* PowerPC-System V thread switching module.
*
* This software is largely based on the original PowerPC-Linux porting
* developed by Ken Aaker <kenaaker@silverbacksystems.com>
*
* Marco Bucci <marco.bucci@inwind.it>
* December 2002
*
*/
/*
*
* PowerPC Register convections:
*
* r0 volatile
* r1 SP
* r2 system reserved
* r3-r4 volatile for parameter passing and function return
* r5-r10 volatile for parameter passing
* r11-r12 volatile
* r13-r14 non volatile registers
* f0 volatile
* f1 volatile for parameter passing and function return
* f2-f13 volatile for parameter passing
* f14-f31 non volatile
*
* cr2-cr4 non volatile
*
*
* See on the heather file for more documentation.
*
*
*
* IMPLEMENTATION NOTES
*
*
* 1) Condition register saving
* On most machines, the condition code register is caller-save.
* On the PPC, the condition code register is callee-save, so the
* thread context switch must preserve it.
*
*
* 2) Floating point registers saving
* On resuming a thread, floating point registers are or not restored just
* depending on which block routine suspended the thread (i.e. regardless
* whether "qt_block", "qt_blocki" or "qt_abort" is used to resume it).
* This behaviour is obtained by implementing "qt_block" by means af a nested
* call to "qt_blocki". As a result, the blocking of a thread always goes
* and returns through "qt_blocki and, if a thread was blocked by "qt_block",
* its execution resumes from the floating point restoring code on exit
* of "qt_block".
*
* Thanks to David Keppel that explained me this "simple" trick.
*
*
* 3) C languace code debugging
* The original version of this software was developed and debugged under
* MacOS X using the Metrowerks Code Warrior PPC integrated assembler.
* It could be still used with a C inline assembler by means of a suitable
* file to include it.
* In order to avoid "copy and paste" bugs, and make easyer the maintaining,
* I made the minimal changes, so you can find some strange code as:
*
* #if 0
* .if 0
* C code here
* .endif
* #endif
*
* This is just to embed some C code that is needed by the Code Warrior
* integrated assembler.
*
*
* 4) Assembly constants generation
* Constants used in the assembly code are generated by running
* the C code in the sequel (commented). It uses the C macros declared in
* the C heather in order to guarantee that the C interface and the assebly
* code are "aligned". I avoided the use of an assebler preprocessor since
* they are not so standard and moreover using macro espressions makes the
* assembly debugging more difficult.
*
*
#include <iostream>
#include "powerpc_sys5.h"
int main()
{
using namespace std;
int i;
cout << ".set LR_SAVE, " << PPC_LR_SAVE << endl;
cout << ".set BLOCKI_FSIZE, " << QUICKTHREADS_BLOCKI_FRAME_SIZE << endl;
cout << ".set BLOCKI_CR_SAVE, " << QUICKTHREADS_BLOCKI_CR_SAVE << endl;
cout << ".set BLOCK_FSIZE, " << QUICKTHREADS_BLOCK_FRAME_SIZE << endl;
cout << endl;
for(i=0; i<12; i++)
cout << ".set PAR_" << i << ", " << PPC_PAR(i) << endl;
cout << endl;
i = 13;
cout << ".set GPR_SAVE_" << i << ", " << QUICKTHREADS_BLOCKI_GPR_SAVE(i) << endl;
cout << endl;
for(i=31; i>13; i--)
cout << ".set FPR_SAVE_" << i << ", " << QUICKTHREADS_BLOCK_FPR_SAVE(i) << endl;
cout << endl;
cout << ".set VARGS_BKOFF, " << QUICKTHREADS_VARGS_BKOFF << endl;
cout << endl << endl << endl;
for(i=31; i>13; i--)
cout << "\tstfd\tf" << i << ",FPR_SAVE_" << i << "(%r1)" << endl;
cout << endl;
for(i=31; i>13; i--)
cout << "\tlfd \tf" << i << ",FPR_SAVE_" << i << "(%r1)" << endl;
cout << endl << endl << endl;
return 0;
}
*
*
*
*/
#if 0
.text
.align 4
.globl qt_block
.globl _qt_block
.globl qt_blocki
.globl _qt_blocki
.globl qt_abort
.globl _qt_abort
.globl qt_start
.globl _qt_start
.globl qt_vstart
.globl _qt_vstart
.set LR_SAVE, 4
.set BLOCKI_FSIZE, 96
.set BLOCKI_CR_SAVE, 8 /* CR is saved into the callee's stack frame */
.set BLOCK_FSIZE, 160
.set PAR_0, 8
.set PAR_1, 12
.set PAR_2, 16
.set PAR_3, 20
.set PAR_4, 24
.set PAR_5, 28
.set PAR_6, 32
.set PAR_7, 36
.set PAR_8, 40
.set PAR_9, 44
.set PAR_10, 48
.set PAR_11, 52
.set GPR_SAVE_13, 20
.set FPR_SAVE_31, 152
.set FPR_SAVE_30, 144
.set FPR_SAVE_29, 136
.set FPR_SAVE_28, 128
.set FPR_SAVE_27, 120
.set FPR_SAVE_26, 112
.set FPR_SAVE_25, 104
.set FPR_SAVE_24, 96
.set FPR_SAVE_23, 88
.set FPR_SAVE_22, 80
.set FPR_SAVE_21, 72
.set FPR_SAVE_20, 64
.set FPR_SAVE_19, 56
.set FPR_SAVE_18, 48
.set FPR_SAVE_17, 40
.set FPR_SAVE_16, 32
.set FPR_SAVE_15, 24
.set FPR_SAVE_14, 16
/* various offsets used by "qt_varg" */
.set P_T, PAR_0
.set P_STARTUP, PAR_1
.set P_USERF, PAR_2
.set P_CLEANUP, PAR_3
/* the offset used to move back the linkage area to be adiacent to
* the variant argument list before calling "userf(...).
* Skip "t", "startup", "userf", "cleanup" and first
* 8 parameters (since they are passed via registers) */
.set VARGS_BKOFF, 48
/* location where "t" and "cleanup" are saved (with respect of
* the stack frame base) */
.set P_T_SAVE, -4
.set P_CLEANUP_SAVE, -8
#endif
/* Block the current thread saving all integer non volatile registers and
* start a new thread.
*/
#if 0
.if 0
#endif
void *qt_blocki (void *helper, void *a0, void *a1, void *newthread);
asm void *qt_blocki (void *helper, void *a0, void *a1, void *newthread)
{
#if 0
.endif
#endif
#if 0
qt_blocki:
_qt_blocki:
#endif
/* prolog code */
stwu %r1,-BLOCKI_FSIZE(%r1) /* allocate the stack frame */
mflr %r0 /* return addr in r0 */
mfcr %r11 /* CR in r11 */
stw %r0,LR_SAVE+BLOCKI_FSIZE(%r1) /* save return addr in the stack */
stw %r11,BLOCKI_CR_SAVE(%r1) /* save CR in the stack */
stmw %r13,GPR_SAVE_13(%r1) /* save non-volatile reg */
/* call helper(qt_t *old, void *a0, void *a1) */
mtlr %r3 /* "helper" addr in the link reg */
mr %r3,%r1 /* current thread (i.e. the SP) in arg "old" */
mr %r1,%r6 /* swap to the new thread (i.e. to its SP) */
blrl /* jump to "helper" */
/* the "helper" return value is returned (since r3 is not changed) */
/* epilog code: return to the new thread's "qt_blocki" caller */
lmw %r13,GPR_SAVE_13(%r1) /* restore non-volatile reg */
lwz %r0,LR_SAVE+BLOCKI_FSIZE(%r1) /* recover return addr */
lwz %r11,BLOCKI_CR_SAVE(%r1) /* recover CR */
mtlr %r0 /* return address in the link reg */
mtcr %r11 /* restore CR */
addi %r1,%r1,BLOCKI_FSIZE /* free the stack frame */
blr /* return */
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Abort the current thread and start a new thread.
*/
#if 0
.if 0
#endif
void qt_abort (void *helper, void *a0, void *a1, void *newthread);
asm void qt_abort (void *helper, void *a0, void *a1, void *newthread)
{
#if 0
.endif
#endif
#if 0
qt_abort:
_qt_abort:
#endif
/* prolog code */
/* there is no prolog. It will never come back */
/* call helper(qt_t *old, void *a0, void *a1) */
mtlr %r3 /* "helper" addr in the link reg */
mr %r1,%r6 /* swap to the new thread (i.e. to its SP) */
/* we don't need to set "old", we can pass just garbage. Actually, since r3
is not changed, "old" is set to "helper" (don't care) */
blrl /* call "helper" */
/* the "helper" return value is returned (since r3 is not changed) */
/* epilog code: return to the new thread's "qt_blocki" caller */
lmw %r13,GPR_SAVE_13(%r1) /* restore non-volatile reg */
lwz %r0,LR_SAVE+BLOCKI_FSIZE(%r1) /* recover return addr */
lwz %r11,BLOCKI_CR_SAVE(%r1) /* recover CR */
mtlr %r0 /* return address in the link reg */
mtcr %r11 /* restore CR */
addi %r1,%r1,BLOCKI_FSIZE /* free the stack frame */
blr /* return */
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Block the current thread saving all non volatile registers and start
* a new thread.
*/
#if 0
.if 0
#endif
void *qt_block (void *helper, void *a0, void *a1, void *newthread);
asm void *qt_block (void *helper, void *a0, void *a1, void *newthread)
{
#if 0
.endif
#endif
# if 0
qt_block:
_qt_block:
#endif
/* prolog code */
stwu %r1,-BLOCK_FSIZE(%r1) /* allocate the stack frame */
mflr %r0 /* return addr in r0 */
stw %r0,LR_SAVE+BLOCK_FSIZE(%r1) /* save return addr in the stack */
/* save non-volatile fp reg */
stfd %f31,FPR_SAVE_31(%r1)
stfd %f30,FPR_SAVE_30(%r1)
stfd %f29,FPR_SAVE_29(%r1)
stfd %f28,FPR_SAVE_28(%r1)
stfd %f27,FPR_SAVE_27(%r1)
stfd %f26,FPR_SAVE_26(%r1)
stfd %f25,FPR_SAVE_25(%r1)
stfd %f24,FPR_SAVE_24(%r1)
stfd %f23,FPR_SAVE_23(%r1)
stfd %f22,FPR_SAVE_22(%r1)
stfd %f21,FPR_SAVE_21(%r1)
stfd %f20,FPR_SAVE_20(%r1)
stfd %f19,FPR_SAVE_19(%r1)
stfd %f18,FPR_SAVE_18(%r1)
stfd %f17,FPR_SAVE_17(%r1)
stfd %f16,FPR_SAVE_16(%r1)
stfd %f15,FPR_SAVE_15(%r1)
stfd %f14,FPR_SAVE_14(%r1)
/* block the thread */
bl qt_blocki
/* the thread is going to be resumed */
/* restore non-volatile fp reg */
lfd %f31,FPR_SAVE_31(%r1)
lfd %f30,FPR_SAVE_30(%r1)
lfd %f29,FPR_SAVE_29(%r1)
lfd %f28,FPR_SAVE_28(%r1)
lfd %f27,FPR_SAVE_27(%r1)
lfd %f26,FPR_SAVE_26(%r1)
lfd %f25,FPR_SAVE_25(%r1)
lfd %f24,FPR_SAVE_24(%r1)
lfd %f23,FPR_SAVE_23(%r1)
lfd %f22,FPR_SAVE_22(%r1)
lfd %f21,FPR_SAVE_21(%r1)
lfd %f20,FPR_SAVE_20(%r1)
lfd %f19,FPR_SAVE_19(%r1)
lfd %f18,FPR_SAVE_18(%r1)
lfd %f17,FPR_SAVE_17(%r1)
lfd %f16,FPR_SAVE_16(%r1)
lfd %f15,FPR_SAVE_15(%r1)
lfd %f14,FPR_SAVE_14(%r1)
lwz %r0,LR_SAVE+BLOCK_FSIZE(%r1) /* recover return addr */
mtlr %r0 /* return address in the link reg */
addi %r1,%r1,BLOCK_FSIZE /* free the stack frame */
blr /* return */
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Start a single argument thread using parameters preloaded in the stack
* during thread initialization (see comments on stack initialization in the
* heather file).
*
* Executes:
*
* only(u, t, userf);
*/
#if 0
.if 0
#endif
void qt_start(void);
asm void qt_start(void)
{
#if 0
.endif
#endif
#if 0
qt_start:
_qt_start:
#endif
lwz %r3,PAR_0(%r1) /* "u" in r3 */
lwz %r4,PAR_1(%r1) /* "t" in r4 */
lwz %r5,PAR_2(%r1) /* "userf" in r5 */
lwz %r6,PAR_3(%r1) /* "only" in r6 */
mtlr %r6 /* "only" address in the link reg */
/* call only(u, t, userf) */
blrl /* jump to "only" */
/* error if it returns */
b qt_error
/* dead code (some inline asm "wants" the epilog, or they genetare it) */
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* Start a variant argument thread using parameters preloaded in the stack
* during thread initialization (see comments on stack initialization in the
* heather file).
*
* Executes:
*
* startup(t);
* userf_return = userf(...);
* cleanup(pt, userf_return);
*
***** Stack layout on start *****
backchain -> STACK BOTTOM (higher address)
+==========================+
backchain - 4 -> | |
+ LOCAL VARIABLES AREA +
..............
+ +
| |
+--------------------------+
| |
+ ALIGNMEBNT PAD +
..............
+ (if needed) +
| |
+--------------------------+
| | arg(n)
+ +
| |
+ VARIABLE ARGUMENT LIST +
..............
+ for userf call +
SP + PAR(5) -> | | arg(1)
+ +
SP + PAR(4) -> | | arg(0)
+--------------------------+
SP + PAR(3) -> | | cleanup par
+ +
SP + PAR(2) -> | | userf par
+ PARAMETER AREA +
SP + PAR(1) -> | | startup par
+ +
SP + PAR(0) -> | | t par
+--------------------------+
| |
+ LINKAGE AREA +
SP -> | |
+==========================+
STACK TOP (lower address)
Stack grows down
|
V
***** Stack layout before call userf *****
backchain -> STACK BOTTOM (higher address)
+==========================+
backchain - 4 -> | |
+ LOCAL VARIABLES AREA +
..............
+ +
| |
+--------------------------+
| |
+ ALIGNMEBNT PAD +
..............
+ (if needed) +
| |
+--------------------------+
| | arg(n)
+ +
| |
+ VARIABLE ARGUMENT LIST +
..............
+ for userf call +
SP + PAR(1) -> | | arg(1)
+ +
SP + PAR(0) -> | | arg(0)
+--------------------------+
| |
+ LINKAGE AREA +
SP -> | |
+==========================+
STACK TOP (lower address)
Stack grows down
|
V
* To call "userf(...)", the argument list must be adiacent to the linkage
* area. Instead of copy the argument list, we move back the linkage area
* (actually, we just increase the SP and copy the backchain). "t" and
* "cleanup" are saved in a local variable area in order to call
* cleanup(pt, userf_return).
*/
#if 0
.if 0
#endif
void qt_vstart(void);
asm void qt_vstart(void)
{
#if 0
.endif
#endif
#if 0
qt_vstart:
_qt_vstart:
#endif
/* NOTICE: the callee routines could save parameter registers in the caller's
* stack parameter area. We put "t" in PAR(0) in such a way, if startup(t)
* will save "t", it will be saved on the same location thus not delething
* any other parameter.
*/
/* since we will move back the linckage area (to make it adiacent to the
* parameter list), we need to save "t" and "cleanup". We have made room for
* this on the bottom of the stack frame. */
/* save parameters in the local variable area */
lwz %r11,0(%r1) /* get the backchain */
lwz %r3,P_T(%r1)
lwz %r4,P_CLEANUP(%r1)
stw %r3,P_T_SAVE(%r11) /* save "pt" */
stw %r4,P_CLEANUP_SAVE(%r11) /* save "cleanup" */
/* call startup(t) */
lwz %r5,P_STARTUP(%r1)
mtlr %r5
blrl /* call "startup" */
/* call userf(...) */
lwz %r11,0(%r1) /* reload backchain (r11 is volatile) */
lwz %r4,P_USERF(%r1) /* load "userf" */
mtlr %r4
/* first eight parameter of the variant list must be copyed in
* GPR3-GPR10. There is a four places offset due to "t", "startup",
* userf" and "cleanup" */
lwz %r3,PAR_4(%r1)
lwz %r4,PAR_5(%r1)
lwz %r5,PAR_6(%r1)
lwz %r6,PAR_7(%r1)
lwz %r7,PAR_8(%r1)
lwz %r8,PAR_9(%r1)
lwz %r9,PAR_10(%r1)
lwz %r10,PAR_11(%r1)
/* move the linkage area to be adiacent to the argument list */
stw %r11,VARGS_BKOFF(%r1) /* copy backchain */
addi %r1,%r1,VARGS_BKOFF /* move back the stack */
blrl /* call "userf" */
/* call qt_cleanup(void *pt, void *vuserf_return) */
lwz %r11,0(%r1) /* reload backchain (r11 is volatile) */
mr %r4,%r3 /* push "userf" return as 2nd parameter */
lwz %r3,P_T_SAVE(%r11) /* reload "pt" */
lwz %r5,P_CLEANUP_SAVE(%r11) /* reload "cleanup" */
mtlr %r5
blrl
b qt_error
/* dead code (some inline asm "wants" the epilog, or they genetare it) */
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
|
chensx00/gem5_custom | 1,378 | ext/systemc/src/sysc/qt/md/mips_b.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.globl b_call_reg
.globl b_call_imm
.globl b_add
.globl b_load
.ent b_null
b_null:
j $31
.end b_null
.ent b_call_reg
b_call_reg:
la $5,b_null
add $6, $31,0
$L0:
jal $5
jal $5
jal $5
jal $5
jal $5
sub $4, $4,5
bgtz $4,$L0
j $6
.end
.ent b_call_imm
b_call_imm:
add $6, $31,0
$L1:
jal b_null
jal b_null
jal b_null
jal b_null
jal b_null
sub $4, $4,5
bgtz $4,$L1
j $6
.end
.ent b_add
b_add:
add $5, $0,$4
add $6, $0,$4
add $7, $0,$4
add $8, $0,$4
$L2:
sub $4, $4,5
sub $5, $5,5
sub $6, $6,5
sub $7, $7,5
sub $8, $8,5
sub $4, $4,5
sub $5, $5,5
sub $6, $6,5
sub $7, $7,5
sub $8, $8,5
bgtz $4,$L2
j $31
.end
.ent b_load
b_load:
$L3:
ld $0, 0($sp)
ld $0, 4($sp)
ld $0, 8($sp)
ld $0, 12($sp)
ld $0, 16($sp)
ld $0, 20($sp)
ld $0, 24($sp)
ld $0, 28($sp)
ld $0, 32($sp)
ld $0, 36($sp)
sub $4, $4,10
bgtz $4,$L3
j $31
.end
|
chensx00/gem5_custom | 1,516 | ext/systemc/src/sysc/qt/md/sparc_b.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.globl _b_call_reg
.globl _b_call_imm
.globl _b_add
.globl _b_load
_b_null:
retl
nop
_b_call_reg:
sethi %hi(_b_null),%o4
or %o4,%lo(_b_null),%o4
add %o7,%g0, %o3
L0:
call %o4
nop
call %o4
nop
call %o4
nop
call %o4
nop
call %o4
nop
subcc %o0,1,%o0
bg L0
nop
add %o3,%g0, %o7
retl
nop
_b_call_imm:
sethi %hi(_b_null),%o4
or %o4,%lo(_b_null),%o4
add %o7,%g0, %o3
L1:
call _b_null
call _b_null
call _b_null
call _b_null
call _b_null
subcc %o0,1,%o0
bg L0
nop
add %o3,%g0, %o7
retl
nop
_b_add:
add %o0,%g0,%o1
add %o0,%g0,%o2
add %o0,%g0,%o3
add %o0,%g0,%o4
L2:
sub %o0,5,%o0
sub %o1,5,%o1
sub %o2,5,%o2
sub %o3,5,%o3
sub %o4,5,%o4
subcc %o0,5,%o0
sub %o1,5,%o1
sub %o2,5,%o2
sub %o3,5,%o3
sub %o4,5,%o4
bg L2
nop
retl
nop
_b_load:
ld [%sp+ 0], %g0
L3:
ld [%sp+ 4],%g0
ld [%sp+ 8],%g0
ld [%sp+12],%g0
ld [%sp+16],%g0
ld [%sp+20],%g0
ld [%sp+24],%g0
ld [%sp+28],%g0
ld [%sp+32],%g0
ld [%sp+36],%g0
subcc %o0,10,%o0
bg L3
ld [%sp+ 0],%g0
retl
nop
|
chensx00/gem5_custom | 3,974 | ext/systemc/src/sysc/qt/md/m88k.s | /* m88k.s -- assembly support. */
/*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
/* Callee-save r14..r25, r31(sp), r30(fp). r1 === return pc.
* Argument registers r2..r9, return value r2..r3.
*
* On startup, restore regs so retpc === call to a function to start.
*
* We're going to call a function (r2) from within the context switch
* routine. Call it on the new thread's stack on behalf of the old
* thread.
*/
.globl _qt_block
.globl _qt_blocki
.globl _qt_abort
.globl _qt_start
.globl _qt_vstart
/*
** r2: ptr to function to call once curr is suspended
** and control is on r5's stack.
** r3: 1'th arg to *r2.
** r4: 2'th arg to *r2.
** r5: sp of thread to suspend.
**
** The helper routine returns a value that is passed on as the
** return value from the blocking routine. Since we don't
** touch r2 between the helper's return and the end of
** function, we get this behavior for free.
**
** Same entry for integer-only and floating-point, since there
** are no separate integer and floating-point registers.
**
** Each procedure call sets aside a ``home region'' of 8 regs
** for r2-r9 for varargs. For context switches we don't use
** the ``home region'' for varargs so use it to save regs.
** Allocate 64 bytes of save space -- use 32 bytes of register
** save area passed in to us plus 32 bytes we allcated, use
** the other 32 bytes for save area for a save area to call
** the helper function.
*/
_qt_block:
_qt_blocki:
sub r31, r31,64 /* Allocate reg save space. */
st r1, r31,8+32 /* Save callee-save registers. */
st r14, r31,12+32
st.d r15, r31,16+32
st.d r17, r31,24+32
st.d r19, r31,32+32
st.d r21, r31,40+32
st.d r23, r31,48+32
st r25, r31,56+32
st r30, r31,60+32
_qt_abort:
addu r14, r31,0 /* Remember old sp. */
addu r31, r5,0 /* Set new sp. */
jsr.n r2 /* Call helper. */
addu r2, r14,0 /* Pass old sp as an arg0 to helper. */
ld r1, r31,8+32 /* Restore callee-save registers. */
ld r14, r31,12+32
ld.d r15, r31,16+32
ld.d r17, r31,24+32
ld.d r19, r31,32+32
ld.d r21, r31,40+32
ld.d r23, r31,48+32
ld r25, r31,56+32
ld r30, r31,60+32
jmp.n r1 /* Return to new thread's caller. */
addu r31, r31,64 /* Free register save space. */
/*
** Non-varargs thread startup.
** See `m88k.h' for register use conventions.
*/
_qt_start:
addu r2, r14,0 /* Set user arg `pu'. */
addu r3, r15,0 /* ... user function pt. */
jsr.n r17 /* Call `only'. */
addu r4, r16,0 /* ... user function userf. */
bsr _qt_error /* `only' erroniously returned. */
/*
** Varargs thread startup.
** See `m88k.h' for register use conventions.
**
** Call the `startup' function with just argument `pt'.
** Then call `vuserf' with 8 register args plus any
** stack args.
** Then call `cleanup' with `pt' and the return value
** from `vuserf'.
*/
_qt_vstart:
addu r18, r30,0 /* Remember arg7 to `vuserf'. */
addu r30, r0,0 /* Null-terminate call chain. */
jsr.n r17 /* Call `startup'. */
addu r2, r15,0 /* `pt' is arg0 to `startup'. */
addu r2, r19,0 /* Set arg0. */
addu r3, r20,0 /* Set arg1. */
addu r4, r21,0 /* Set arg2. */
addu r5, r22,0 /* Set arg3. */
addu r6, r23,0 /* Set arg4. */
addu r7, r24,0 /* Set arg5. */
addu r8, r25,0 /* Set arg6. */
jsr.n r16 /* Call `vuserf'. */
addu r9, r18,0 /* Set arg7. */
addu r3, r2,0 /* Ret. value is arg1 to `cleanup'. */
jsr.n r14 /* Call `cleanup'. */
addu r2, r15,0 /* `pt' is arg0 to `cleanup'. */
bsr _qt_error /* `cleanup' erroniously returned. */
|
chensx00/gem5_custom | 3,710 | ext/systemc/src/sysc/qt/md/i386.s | /* i386.s -- assembly support. */
/*
// QuickThreads -- Threads-building toolkit.
// Copyright (c) 1993 by David Keppel
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose and without fee is hereby
// granted, provided that the above copyright notice and this notice
// appear in all copies. This software is provided as a
// proof-of-concept and for demonstration purposes; there is no
// representation about the suitability of this software for any
// purpose. */
/* NOTE: double-labeled `_name' and `name' for System V compatability. */
/* NOTE: Mixed C/C++-style comments used. Sorry! */
/* Callee-save: %esi, %edi, %ebx, %ebp
// Caller-save: %eax, %ecx
// Can't tell: %edx (seems to work w/o saving it.)
//
// Assignment:
//
// See ``i386.h'' for the somewhat unconventional stack layout. */
.text
.align 2
.globl _qt_abort
.globl qt_abort
.globl _qt_block
.globl qt_block
.globl _qt_blocki
.globl qt_blocki
.globl _qt_align
.globl qt_align
/* These all have the type signature
//
// void *blocking (helper, arg0, arg1, new)
//
// On procedure entry, the helper is at 4(sp), args at 8(sp) and
// 12(sp) and the new thread's sp at 16(sp). It *appears* that the
// calling convention for the 8X86 requires the caller to save all
// floating-point registers, this makes our life easy. */
/* Halt the currently-running thread. Save it's callee-save regs on
// to the stack, 32 bytes. Switch to the new stack (next == 16+32(sp))
// and call the user function (f == 4+32(sp) with arguments: old sp
// arg1 (8+32(sp)) and arg2 (12+32(sp)). When the user function is
// done, restore the new thread's state and return.
//
// `qt_abort' is (currently) an alias for `qt_block' because most of
// the work is shared. We could save the insns up to `qt_common' by
// replicating, but w/o replicating we need an inital subtract (to
// offset the stack as if it had been a qt_block) and then a jump
// to qt_common. For the cost of a jump, we might as well just do
// all the work.
//
// The helper function (4(sp)) can return a void* that is returned
// by the call to `qt_blockk{,i}'. Since we don't touch %eax in
// between, we get that ``for free''. */
_qt_abort:
qt_abort:
_qt_block:
qt_block:
_qt_blocki:
qt_blocki:
pushl %ebp /* Save callee-save, sp-=4. */
pushl %esi /* Save callee-save, sp-=4. */
pushl %edi /* Save callee-save, sp-=4. */
pushl %ebx /* Save callee-save, sp-=4. */
movl %esp, %eax /* Remember old stack pointer. */
qt_common:
movl 32(%esp), %esp /* Move to new thread. */
pushl 28(%eax) /* Push arg 2. */
pushl 24(%eax) /* Push arg 1. */
pushl %eax /* Push arg 0. */
movl 20(%eax), %ebx /* Get function to call. */
call *%ebx /* Call f. */
addl $12, %esp /* Pop args. */
popl %ebx /* Restore callee-save, sp+=4. */
popl %edi /* Restore callee-save, sp+=4. */
popl %esi /* Restore callee-save, sp+=4. */
popl %ebp /* Restore callee-save, sp+=4. */
_qt_align:
qt_align:
ret /* Resume the stopped function. */
.globl _qt_tramp
.globl qt_tramp
_qt_tramp:
qt_tramp:
movl 12(%esp), %eax /* Load 'qt_error' address */
sub $4, %esp /* Align stack pointer to 16-byte boundary */
jmp *%eax /* call 'qt_error' */
hlt /* 'qt_error' never returns */
/* Start a varargs thread. */
.globl _qt_vstart
.globl qt_vstart
_qt_vstart:
qt_vstart:
pushl %edi /* Push `pt' arg to `startup'. */
call *%ebp /* Call `startup'. */
popl %eax /* Clean up the stack. */
call *%ebx /* Call the user's function. */
pushl %eax /* Push return from user's. */
pushl %edi /* Push `pt' arg to `cleanup'. */
call *%esi /* Call `cleanup'. */
hlt /* `cleanup' never returns. */
|
chensx00/gem5_custom | 3,225 | ext/systemc/src/sysc/qt/md/powerpc_mach_b.s | /* speed test for basic CPU operations */
/* Marco Bucci <marco.bucci@inwind.it> */
/* This code was developed with the Code Warrior integrate ppc assembler.
* Macros are use to hide illegal constructs whether you are using a
* "normal" assembler or the "C integrated" assembler.
*/
#if 0
.text
.align 4
.globl b_call_reg
.globl _b_call_reg
.globl b_call_imm
.globl _b_call_imm
.globl b_add
.globl _b_add
.globl b_load
.globl _b_load
.set fsize, 64
.set lrsave, 8
#else
#define fsize 64
#define lrsave 8
#endif
#if 0
.if 0
#endif
asm void b_null(void)
{
#if 0
.endif
#endif
#if 0
b_null:
#endif
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
/* actually the same as the following. How to get "b_null" address?
* I didnt find the right sintax or the right way.
* I should take the current PC, then the difference to "b_null"
* (making the difference beween the labels), perform the sum and go?!
*/
#if 0
.if 0
#endif
asm void b_call_reg(long n)
{
#if 0
.endif
#endif
#if 0
b_call_reg:
_b_call_reg:
#endif
mflr r0
stw r31,-4(r1)
stw r30,-8(r1)
stw r0,lrsave(r1)
stwu r1,-fsize(r1)
mr r30,r3
li r31,0
b L1
L0:
bl b_null
bl b_null
bl b_null
bl b_null
bl b_null
addi r31,r31,5
L1:
cmpw r31,r30
blt L0
lwz r0,lrsave+fsize(r1)
mtlr r0
lwz r31,-4+fsize(r1)
lwz r30,-8+fsize(r1)
addi r1,r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
#if 0
.if 0
#endif
asm void b_call_imm(long n)
{
#if 0
.endif
#endif
#if 0
b_call_imm:
_b_call_imm:
#endif
mflr r0
stw r31,-4(r1)
stw r30,-8(r1)
stw r0,lrsave(r1)
stwu r1,-fsize(r1)
mr r30,r3
li r31,0
b L3
L2:
bl b_null
bl b_null
bl b_null
bl b_null
bl b_null
addi r31,r31,5
L3:
cmpw r31,r30
blt L2
lwz r0,lrsave+fsize(r1)
mtlr r0
lwz r31,-4+fsize(r1)
lwz r30,-8+fsize(r1)
addi r1,r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
#if 0
.if 0
#endif
asm void b_add(long n)
{
#if 0
.endif
#endif
#if 0
b_add:
_b_add:
#endif
mflr r0
stw r31,-4(r1)
stw r30,-8(r1)
stw r0,lrsave(r1)
stwu r1,-fsize(r1)
mr r30,r3
li r31,0
b L5
L4:
addi r3,r3,5
addi r4,r4,5
addi r5,r5,5
addi r6,r6,5
addi r7,r7,5
addi r3,r3,5
addi r4,r4,5
addi r5,r5,5
addi r6,r6,5
addi r7,r7,5
addi r31,r31,10
L5:
cmpw r31,r30
blt L4
lwz r0,lrsave+fsize(r1)
mtlr r0
lwz r31,-4+fsize(r1)
lwz r30,-8+fsize(r1)
addi r1,r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
#if 0
.if 0
#endif
asm void b_load(long n)
{
#if 0
.endif
#endif
#if 0
b_load:
_b_load:
#endif
mflr r0
stw r31,-4(r1)
stw r30,-8(r1)
stw r0,lrsave(r1)
stwu r1,-fsize(r1)
mr r30,r3
li r31,0
b L7
L6:
lwz r3,4(r1)
lwz r4,8(r1)
lwz r5,12(r1)
lwz r6,16(r1)
lwz r7,20(r1)
lwz r3,24(r1)
lwz r4,28(r1)
lwz r5,32(r1)
lwz r6,36(r1)
lwz r7,40(r1)
addi r31,r31,10
L7:
cmpw r31,r30
blt L6
lwz r0,lrsave+fsize(r1)
mtlr r0
lwz r31,-4+fsize(r1)
lwz r30,-8+fsize(r1)
addi r1,r1,fsize
blr
#if 0
.if 0
#endif
}
#if 0
.endif
#endif
|
chensx00/gem5_custom | 1,043 | ext/systemc/src/sysc/qt/md/ksr1_b.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.file "ksr1_b.s"
.def .debug; .endef
.globl b_call_reg$TXT
.globl b_call_reg
.globl b_call_imm$TXT
.globl b_call_imm
.globl b_add$TXT
.globl b_add
.globl b_load$TXT
.globl b_load
b_call_reg:
b_call_imm:
b_add:
b_load:
.word b_call_reg$TXT
.word qt_error
.word qt_error$TXT
b_call_reg$TXT:
b_call_imm$TXT:
b_add$TXT:
b_load$TXT:
finop ; cxnop
finop ; cxnop
finop ; ld8 16(%cp),%c4
finop ; ld8 8(%cp),%cp
finop ; cxnop
finop ; cxnop
finop ; jsr %c4,0(%c4)
finop ; cxnop
finop ; cxnop
|
chensx00/gem5_custom | 1,998 | ext/systemc/src/sysc/qt/md/m88k_b.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.text
.globl _b_call_reg
.globl _b_call_imm
.globl _b_add
.globl _b_load
_b_null:
jmp r1
_b_call_reg:
subu r31, r31,8 /* Alloc ret pc save space. */
st r1, r31,32 /* Save ret pc. */
or.u r3, r0,hi16(_b_null) /* Put call addr in a reg. */
or r3, r3,lo16(_b_null)
jsr r3
L0:
jsr r3
jsr r3
jsr r3
jsr.n r3
subu r2, r2,5 /* Decrement #of iter to go. */
bcnd.n gt0,r2,L0
jsr r3
ld r1, r31,32
jmp r1
_b_call_imm:
subu r31, r31,8 /* Alloc ret pc save space. */
st r1, r31,32 /* Save ret pc. */
bsr _b_null
L1:
bsr _b_null
bsr _b_null
bsr _b_null
bsr.n _b_null
subu r2, r2,5 /* Decrement #of iter to go. */
bcnd.n gt0,r2,L1
bsr _b_null
ld r1, r31,32
jmp r1
_b_add:
add r0, r3,r4
L2:
add r3, r4,r5
add r4, r5,r6
add r5, r6,r7
add r8, r9,r0
add r0, r3,r4
add r3, r4,r5
add r4, r5,r6
add r5, r6,r7
add r8, r9,r0
add r0, r3,r4
add r3, r4,r5
add r4, r5,r6
add r5, r6,r7
add r8, r9,r0
add r0, r3,r4
add r3, r4,r5
add r4, r5,r6
add r5, r6,r7
add r8, r9,r0
subu r2, r2,20 /* Decrement #of iter to go. */
bcnd.n gt0,r2,L2
add r0, r3,r4
jmp r1
_b_load:
ld r0, r31,0
L3:
ld r3, r31,4
ld r4, r31,8
ld r5, r31,12
ld r6, r31,16
ld r0, r31,0
ld r3, r31,4
ld r4, r31,8
ld r5, r31,12
ld r6, r31,16
ld r0, r31,0
ld r3, r31,4
ld r4, r31,8
ld r5, r31,12
ld r6, r31,16
ld r0, r31,0
ld r3, r31,4
ld r4, r31,8
ld r5, r31,12
ld r6, r31,16
subu r2, r2,20 /* Decrement #of iter to go. */
bcnd.n gt0,r2,L3
ld r0, r31,0
jmp r1
|
chensx00/gem5_custom | 1,635 | ext/systemc/src/sysc/qt/md/axp_b.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
.text
.globl b_call_reg
.globl b_call_imm
.globl b_add
.globl b_load
.ent b_null
b_null:
ret $31,($18),1
.end b_null
.ent b_call_reg
b_call_reg:
lda $27,b_null
$L0:
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
jsr $18,($27)
subq $16,1,$16
bgt $16,$L0
ret $31,($26),1
.end
.ent b_call_imm
b_call_imm:
$L1:
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
jsr $18,b_null
subq $16,1,$16
bgt $16,$L1
ret $31,($26),1
.end
.ent b_add
b_add:
$L2:
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
addq $31,$31,$31
subq $16,1,$16
bgt $16,$L2
ret $31,($26),1
.end
.ent b_load
b_load:
$L3:
ldq $31,0($30)
ldq $31,8($30)
ldq $31,16($30)
ldq $31,24($30)
ldq $31,32($30)
ldq $31,0($30)
ldq $31,8($30)
ldq $31,16($30)
ldq $31,24($30)
ldq $31,32($30)
subq $16,1,$16
bgt $16,$L3
ret $31,($26),1
.end
|
chensx00/gem5_custom | 2,293 | ext/systemc/src/sysc/qt/md/iX86_64.s | /* iX386_64.s -- assembly support. */
/*
// QuickThreads -- Threads-building toolkit.
// Copyright (c) 1993 by David Keppel
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose and without fee is hereby
// granted, provided that the above copyright notice and this notice
// appear in all copies. This software is provided as a
// proof-of-concept and for demonstration purposes; there is no
// representation about the suitability of this software for any
// purpose. */
/* 64-bit Intel Architecture Support
// written by Andy Goodrich, Forte Design Systms, Inc. */
/* NOTE: double-labeled `_name' and `name' for System V compatability. */
/* NOTE: Mixed C/C++-style comments used. Sorry! */
.text
.align 2
.globl _qt_abort
.globl qt_abort
.globl _qt_block
.globl qt_block
.globl _qt_blocki
.globl qt_blocki
.globl _qt_align
.globl qt_align
_qt_abort:
qt_abort:
_qt_block:
qt_block:
_qt_blocki:
qt_blocki:
/* 11 (return address.) */
pushq %rbp /* 10 (push stack frame top.) */
movq %rsp, %rbp /* set new stack frame top. */
/* save registers. */
subq $8, %rsp /* 9 (Stack alignment) */
pushq %r12 /* 8 ... */
pushq %r13 /* 7 ... */
pushq %r14 /* 6 ... */
pushq %r15 /* 5 ... */
pushq %rbx /* 4 ... */
pushq %rcx /* 3 ... (new stack address) */
pushq %rdx /* 2 ... (arg) */
pushq %rdi /* 1 ... (address of save function.) */
pushq %rsi /* 0 ... (cor) */
movq %rdi, %rax /* get address of save function. */
movq %rsp, %rdi /* set current stack as save argument. */
movq %rcx, %rsp /* swap stacks. */
movq %rcx, %rbp /* adjust stack frame pointer. */
addq $10*8, %rbp /* ... */
call *%rax /* call function to save stack pointer. */
/* restore registers. */
popq %rsi /* ... */
popq %rdi /* ... */
popq %rdx /* ... */
popq %rcx /* ... */
popq %rbx /* ... */
popq %r15 /* restore registers from new stack. */
popq %r14 /* ... */
popq %r13 /* ... */
popq %r12 /* ... */
leave /* unwind stack. */
_qt_align:
qt_align:
ret /* return. */
|
chensx00/gem5_custom | 491 | ext/systemc/src/sysc/qt/md/null.s | /*
* QuickThreads -- Threads-building toolkit.
* Copyright (c) 1993 by David Keppel
*
* Permission to use, copy, modify and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that the above copyright notice and this notice
* appear in all copies. This software is provided as a
* proof-of-concept and for demonstration purposes; there is no
* representation about the suitability of this software for any
* purpose.
*/
|
chensx00/gem5_custom | 2,564 | util/m5/src/abi/riscv/m5op.S | /*
* Copyright (c) 2020 The Regents of the University of California.
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gem5/asm/generic/m5ops.h>
// riscv pseudo instructions have bit 1:0 (QUADRANT) = 0x3,
// bit 6:2 (OPCODE5) = 0x1e, and bit 31:25 (M5FUNC) specifies
// the function performed by pseudo instruction
.macro m5op_func, name, func
.globl \name
\name:
.long 0x0000007b | (\func << 25)
ret
.endm
.text
#define M5OP(name, func) m5op_func name, func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 2,273 | util/m5/src/abi/x86/m5op_addr.S | /*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gem5/asm/generic/m5ops.h>
/*
* Note: The ABI for pseudo ops using the M5OP_ADDR is defined in
* src/arch/x86/pseudo_inst_abi.hh. If the ABI is changed below, it's likely
* that the ABI in the arch directory will also need to be updated.
*/
.macro m5op_func, name, func
.globl \name
.func \name
\name:
#if defined(M5OP_PIC)
mov m5_mem@gotpcrel(%rip), %r11
mov (%r11), %r11
#else
mov m5_mem, %r11
#endif
mov $\func, %rax
shl $8, %rax
mov 0(%r11, %rax, 1), %rax
ret
.endfunc
.endm
.text
#define M5OP(name, func) m5op_func M5OP_MERGE_TOKENS(name, _addr), func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 1,867 | util/m5/src/abi/x86/m5op.S | /*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gem5/asm/generic/m5ops.h>
.macro m5op_func, name, func
.globl \name
.func \name
\name:
.byte 0x0F, 0x04
.word \func
ret
.endfunc
.endm
.text
#define M5OP(name, func) m5op_func name, func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 2,531 | util/m5/src/abi/thumb/m5op.S | /*
* Copyright (c) 2010, 2016 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.syntax unified
.thumb
#include <gem5/asm/generic/m5ops.h>
.text
.macro m5op_func name, func
.align 2
.globl \name
.thumb_func
\name:
.short 0xEE00 | \func
.short 0x0110
bx lr
.endm
.text
#define M5OP(name, func) m5op_func name, func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 2,497 | util/m5/src/abi/arm/m5op.S | /*
* Copyright (c) 2010, 2016 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.syntax unified
#include <gem5/asm/generic/m5ops.h>
.text
.macro m5op_func name, func
.align 2
.globl \name
\name:
.long 0xEE000110 | (\func << 16)
mov pc,lr
.endm
.text
#define M5OP(name, func) m5op_func name, func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 1,994 | util/m5/src/abi/sparc/m5op.S | /*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define m5_op 0x2
#define m5_op3 0x37
#include <gem5/asm/generic/m5ops.h>
.macro m5op_func name, func
.section ".text";
.align 4;
.global \name;
.type \name, #function;
\name:
retl
.long (m5_op) << 30 | (m5_op3) << 19 | (\func) << 7;
.size \name, (.-\name)
.endm
#define M5OP(name, func) m5op_func name, func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 2,816 | util/m5/src/abi/arm64/m5op_addr.S | /*
* Copyright (c) 2010-2013, 2016-2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gem5/asm/generic/m5ops.h>
.macro m5op_func, name, func
.globl \name
\name:
// Load the value of m5_mem into x9...
#if defined(M5OP_PIC)
// using the global offset table.
adrp x9, :got:m5_mem
ldr x9, [ x9, #:got_lo12:m5_mem ]
ldr x9, [ x9 ]
#else
// normally.
adrp x9, m5_mem
ldr x9, [ x9, #:lo12:m5_mem ]
#endif
movz x10, #(\func << 8)
ldr x0, [ x9, x10 ]
ret
.endm
.text
#define M5OP(name, func) m5op_func M5OP_MERGE_TOKENS(name, _addr), func;
M5OP_FOREACH
#undef M5OP
|
chensx00/gem5_custom | 3,752 | util/m5/src/abi/arm64/m5op_semi.S | /*
* Copyright (c) 2010-2013, 2016-2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gem5/asm/generic/m5ops.h>
.macro m5op_func, name, func
.globl \name
\name:
// Put the m5 op number in x16.
mov x16, #(\func << 8)
// Branch into the common handler for the rest.
b 1f
.endm
.text
#define M5OP(name, func) m5op_func M5OP_MERGE_TOKENS(name, _semi), func;
M5OP_FOREACH
#undef M5OP
1:
// Get the address of the argument block.
ldr x17, =m5_semi_argument_block
// Store the m5 op number in the first slot.
str x16, [ x17 ], #8
// Store all 8 possible arguments in the subsequent slots. We don't
// know how many we need, so just store them all.
str x0, [ x17 ], #8
str x1, [ x17 ], #8
str x2, [ x17 ], #8
str x3, [ x17 ], #8
str x4, [ x17 ], #8
str x5, [ x17 ], #8
str x6, [ x17 ], #8
str x7, [ x17 ], #8
// Set x0 to the m5 op semi-hosting call number.
mov x0, #0x100
// Set x1 to the address of the argument blob.
ldr x1, =m5_semi_argument_block
// Trigger the semihosting call with the gem5 specific immediate.
hlt #0x5d57
ret
.data
.globl m5_semi_argument_block
m5_semi_argument_block:
.quad 0 // function
.quad 0 // argument 0
.quad 0 // argument 1
.quad 0 // argument 2
.quad 0 // argument 3
.quad 0 // argument 4
.quad 0 // argument 5
.quad 0 // argument 6
.quad 0 // argument 7
|
chensx00/gem5_custom | 2,470 | util/m5/src/abi/arm64/m5op.S | /*
* Copyright (c) 2010-2013, 2016-2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gem5/asm/generic/m5ops.h>
.macro m5op_func, name, func
.globl \name
\name:
.long 0xff000110 | (\func << 16)
ret
.endm
.text
#define M5OP(name, func) m5op_func name, func;
M5OP_FOREACH
#undef M5OP
|
CHERI-Alliance/CHERI-Microkit | 6,915 | loader/src/riscv/crt0.S | /*
* Copyright 2024, UNSW
*
* SPDX-License-Identifier: BSD-2-Clause
*/
.extern main
.extern __global_pointer$
#ifndef FIRST_HART_ID
#error "Loader must be defined with FIRST_HART_ID"
#endif
.section ".text.start"
#define STACK_SIZE 4096
/* SBI commands for the HSM extension */
#define SBI_HSM_BASE_EID 0x48534DULL
#define SBI_HSM_BASE_HART_START_FID 0
#define SBI_HSM_BASE_HART_STOP_FID 1
/* SBI commands for the base extension */
#define SBI_EXT_BASE_EID 0x10
#define SBI_EXT_BASE_PROBE_EXT_FID 3
/* Unfortunately, the latest version of the SBI (v1.0.0) specification does not
* specify the starting state of supervisor software.
* OpenSBI gives the following:
* a0: current hart ID
* a1: address of the DTB (given by previous booting stage)
* However, these parameters are not enforced by the specification. Therefore, if you were
* to use a different SBI implementation or your own, it might not be compatible with
* this loader.
* The hart ID is required as seL4 expects to be booted on CONFIG_FIRST_HART_ID.
* The address of the DTB is passed to seL4 which is then passed to the initial
* task (via the BootInfo frame).
*
* On RISC-V, only M-Mode can access the CSR mhartid to get the actual hart ID,
* the SBI running there is responsible for passing this ID up. In S-Mode there
* is no way to ever query it again, so we have to preserve what we get passed
* here. This is a RISC-V design decision, more background can be found at
* https://github.com/riscv/riscv-sbi-doc/issues/25.
*
* It seems that OpenSBI starts us at a random hart and keeps all other harts
* suspended or spinning. However, even on non-SMP configurations there might
* be an expectation that we are running on FIRST_HART_ID
* hart turns out to be a different one, we have to switch harts somehow. The
* SBI Heart State Management (HSM) extension exists for this, but it might not
* be implemented. In this case, there is nothing we can do here in the assembly
* startup code, but C boot code might still have platform specific proprietary
* ways to switch harts.
*/
.global _start
_start:
.option push
.option norelax
1:auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
/* save the parameters passed */
mv s0, a0 /* preserve a0 (hart id) in s0 */
mv s2, a1 /* preserve a1 (dtb) in s2 */
/* Attach the stack to sp before calling any C functions */
la sp, (_stack + STACK_SIZE)
/* Check if the Heart State Management (HSM) extension exists, so it can be
* used to switch harts if we are not running on hart FIRST_HART_ID
* The SBI returns SBI_SUCCESS (0) in a0 if the call could be processed or an
* error code if not. On SBI_SUCCESS the value in a1 is 0 if the extension is
* not available or an extension-specific non-zero value if it is available.
*/
li a7, SBI_EXT_BASE_EID
li a6, SBI_EXT_BASE_PROBE_EXT_FID
li a0, SBI_HSM_BASE_EID
ecall /* call SBI to probe for HSM extension */
mv a2, a0 /* move SBI call generic return code to a2 as we need a0 */
mv a3, a1 /* move SBI call error return code to a3 as we need a1 */
mv a0, s0 /* restore a0 to hold hart ID passed by the boot loader */
mv a1, s2 /* restore a1 to hold dtb address passed by the boot loader */
bnez a2, _start1 /* goto _start1 if SBI did not return SBI_SUCCESS (0) */
beqz a3, _start1 /* goto _start1 if HSM extension is missing */
/* Check if we are running on the hart we expect on, FIRST_HART_ID */
li s1, FIRST_HART_ID
beq a0, s1, _start1 /* goto _start1 if we are on FIRST_HART_ID */
/* Use HSM extension to start hart FIRST_HART_ID. */
hsm_switch_hart:
li a7, SBI_HSM_BASE_EID
li a6, SBI_HSM_BASE_HART_START_FID
li a0, FIRST_HART_ID
mv a2, s2 /* dtb address to be passed in a1 when new hart starts is 3rd parameter */
la a1, _start1 /* where to start the hart */
ecall /* call SBI to start hart FIRST_HART_ID */
/* Since we are not the designated primary hart, continue the boot process as
* secondary hart
*/
mv a0, s0 /* restore a0 to hold hart ID passed by OpenSBI */
j spin_hart /* Spin any hart that isn't FIRST_HART_ID since we only support single-core right now. */
relocate:
/* Save the return address */
addi sp, sp, -0x8
sd ra, (sp)
/* Check if we are already in the right place */
li a0, LINK_ADDRESS
la a1, _start
beq a0, a1, image_ok
/* Log that we are being relocated */
jal relocation_log
/* Restore a0 and a1 */
li a0, LINK_ADDRESS
la a1, _start
/* Sanity check: We don't want to overwrite ourselves! We assume that
* everything between _start (src_start) and _bss_end (src_end) is important (i.e.
* something that might be run while relocating) but allow overlap for
* things after _bss_end i.e. the loader_data.
*/
la a2, _bss_end
/* The loader_data is directly after _bss_end, with the first
* value being the loader_data struct. The second field of this
* struct is the size of the loader_data region, so we add
* this to _bss_end to get the real end of the image
*/
ld a3, 8(a2)
add a2, a2, a3
sub a2, a2, a1
la a3, _bss_end
sub a5, a3, a1
add a4, a0, a5
/* At this point:
* a0: dst_start (LINK_ADDRESS)
* a1: src_start (_start)
* a2: image_size
* a3: src_end (_bss_end)
* a4: dst_end (LINK_ADDRESS + (_bss_end - _start))
*/
/* check: if (dst_end >= src_start && dst_end < src_end) { abort } */
blt a4, a1, 1f
bge a4, a3, 1f
j cant_reloc
1:
/* check: if (dst_start >= src_start && dest_start < src_end) { abort } */
blt a0, a1, 2f
bge a0, a3, 2f
cant_reloc:
j relocation_failed
2:
/* a0 = desired image base */
/* a1 = current image base */
/* a2 = image size */
jal memmove
j 1f
image_ok:
li a0, 1
1:
ld ra, (sp)
addi sp, sp, 0x8
ret
_start1: /* a0 must hold current hard ID passed by bootloader */
/* a1 must hold dtb address passed by bootloader */
.option push
.option norelax
1:auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
la sp, (_stack + STACK_SIZE)
/* save the parameters passed */
mv s0, a0 /* preserve a0 (hart id) in s0 */
mv s2, a1 /* preserve a1 (dtb) in s2 */
/* May not have been loaded in the right location.
* Try and move ourselves so we're in the right place
*/
jal relocate
/* Check if we were relocated. If not, just jump straight to main */
li a1, 1
beq a0, a1, 1f
/* Restore arguments and go back to _start of the relocated image */
mv s3, a0
mv a0, s0 /* restore a0 to hold hart ID passed by the boot loader */
mv a1, s2 /* restore a1 to hold dtb address passed by the boot loader */
jr s3
1:
la s0, main
jr s0
.text
.global secondary_harts
secondary_harts:
.option push
.option norelax
1:auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
spin_hart:
wfi
j spin_hart
|
CHERI-Alliance/CHERI-Microkit | 2,883 | loader/src/aarch64/crt0.S | /*
* Copyright 2021, Breakaway Consulting Pty. Ltd.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
.extern main
.section ".text.start"
.global _start;
.type _start, %function;
_start:
mrs x0, mpidr_el1
and x0, x0,#0xFF // Check processor id
cbz x0, master // Hang for all non-primary CPU
proc_hang:
wfe
b proc_hang
master:
adrp x1, _stack
add x1, x1, #0xff0
mov sp, x1
/* May not have been loaded in the right location.
* Try and move ourselves so we're in the right place
*/
bl relocate
cmp x0, #1
beq 1f
/* Otherwise, jump to the start of the new loader */
br x0
1:
b main
relocate:
stp x29, x30, [sp, #-16]!
mov x29, sp
/* Check if the image is already at the correct location */
ldr x0, =LINK_ADDRESS
adr x1, _start
cmp x0, x1
beq image_ok
/* Log that we are being relocated */
bl relocation_log
/* Restore x0 and x1 */
ldr x0, =LINK_ADDRESS
adr x1, _start
/* Sanity check: We don't want to overwrite ourselves! We assume that
* everything between _start (src_start) and _bss_end (src_end) is important (i.e.
* something that might be run while relocating) but allow overlap for
* things after _bss_end i.e. the loader_data.
*/
adrp x2, _bss_end
add x2, x2, #:lo12:_bss_end
/* The loader_data is directly after _bss_end, with the first
* value being the loader_data struct. The second field of
* this struct is the size of the loader_data region, so we
* add this to _bss_end to get the real end of the image
*/
ldr x3, [x2, #+8]
add x2, x2, x3
sub x2, x2, x1
adrp x3, _bss_end
add x3, x3, #:lo12:_bss_end
sub x5, x3, x1
add x4, x0, x5 /* dst_end */
/* At this point:
* x0: dst_code_start (LINK_ADDRESS)
* x1: src_code_start (_start)
* x2: image_size
* x3: src_code_end (_bss_end)
* x4: dst_code_end (LINK_ADDRESS + (_bss_end - _start))
*/
/* check: if (dst_end >= src_start && dst_end < src_end) { abort } */
cmp x4, x1
blt 1f
cmp x4, x3
bge 1f
b cant_reloc
1:
/* check: if (dst_start >= src_start && dest_start < src_end) { abort } */
cmp x0, x1
blt 2f
cmp x0, x3
bge 2f
cant_reloc:
b relocation_failed
2:
/* x0 = desired image base */
/* x1 = current image base */
/* x2 = image size */
bl memmove
/* x0 = dest, save it to a callee-saved register while we invalidate icache */
mov x19, x0
bl flush_dcache
bl invalidate_icache
mov x0, x19
b 1f
image_ok:
/* Already in the right place, keep booting */
mov x0, #1
1:
ldp x29, x30, [sp], #16
ret
|
CHERI-Alliance/CHERI-Microkit | 11,380 | loader/src/aarch64/util64.S | /*
* Copyright 2021, Breakaway Consulting Pty. Ltd.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#define PSR_F_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
#define PSR_MODE_EL0t 0x00000000
#define PSR_MODE_EL1t 0x00000004
#define PSR_MODE_EL1h 0x00000005
#define PSR_MODE_EL2t 0x00000008
#define PSR_MODE_EL2h 0x00000009
#define PSR_MODE_SVC_32 0x00000013
#define SCR_RW_BIT 0x00000400
#define SCR_SMD_BIT 0x00000080
#define SCR_RES_BITS 0x00000030
#define SCR_NS_BIT 0x00000001
#define MT_DEVICE_nGnRnE 0
#define MT_DEVICE_nGnRE 1
#define MT_DEVICE_GRE 2
#define MT_NORMAL_NC 3
#define MT_NORMAL 4
#define MAIR(_attr, _mt) ((_attr) << ((_mt) * 8))
#define TCR_T0SZ(x) ((64 - (x)))
#define TCR_T1SZ(x) ((64 - (x)) << 16)
#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
#define TCR_IRGN0_WBWC (1 << 8)
#define TCR_IRGN_NC ((0 << 8) | (0 << 24))
#define TCR_IRGN_WBWA ((1 << 8) | (1 << 24))
#define TCR_IRGN_WT ((2 << 8) | (2 << 24))
#define TCR_IRGN_WBnWA ((3 << 8) | (3 << 24))
#define TCR_IRGN_MASK ((3 << 8) | (3 << 24))
#define TCR_ORGN0_WBWC (1 << 10)
#define TCR_ORGN_NC ((0 << 10) | (0 << 26))
#define TCR_ORGN_WBWA ((1 << 10) | (1 << 26))
#define TCR_ORGN_WT ((2 << 10) | (2 << 26))
#define TCR_ORGN_WBnWA ((3 << 10) | (3 << 26))
#define TCR_ORGN_MASK ((3 << 10) | (3 << 26))
#define TCR_SH0_ISH (3 << 12)
#define TCR_SHARED ((3 << 12) | (3 << 28))
#define TCR_TG0_4K (0 << 14)
#define TCR_TG0_64K (1 << 14)
#define TCR_TG1_4K (2 << 30)
#define TCR_TG1_64K (3 << 30)
#define TCR_PS_4G (0 << 16)
#define TCR_PS_64G (1 << 16)
#define TCR_PS_1T (2 << 16)
#define TCR_PS_4T (3 << 16)
#define TCR_PS_16T (4 << 16)
#define TCR_PS_256T (5 << 16)
#ifndef PHYSICAL_ADDRESS_BITS
#error "Missing PHYSICAL_ADDRESS_BITS"
#endif
#if PHYSICAL_ADDRESS_BITS == 40
#define TCR_PS TCR_PS_1T
#elif PHYSICAL_ADDRESS_BITS == 44
#define TCR_PS TCR_PS_16T
#else
#error "Unsupported PHYSICAL_ADDRESS_BITS value"
#endif
#define TCR_EL2_RES1 ((1 << 23) | (1 << 31))
#define TCR_ASID16 (1 << 36)
/* Assembler Macros */
.macro dcache op
dsb sy
mrs x0, clidr_el1
and x3, x0, #0x7000000
lsr x3, x3, #23
cbz x3, finished_\op
mov x10, #0
loop1_\op:
add x2, x10, x10, lsr #1
lsr x1, x0, x2
and x1, x1, #7
cmp x1, #2
b.lt skip_\op
msr csselr_el1, x10
isb
mrs x1, ccsidr_el1
and x2, x1, #7
add x2, x2, #4
mov x4, #0x3ff
and x4, x4, x1, lsr #3
clz w5, w4
mov x7, #0x7fff
and x7, x7, x1, lsr #13
loop2_\op:
mov x9, x4
loop3_\op:
lsl x6, x9, x5
orr x11, x10, x6
lsl x6, x7, x2
orr x11, x11, x6
dc \op, x11
subs x9, x9, #1
b.ge loop3_\op
subs x7, x7, #1
b.ge loop2_\op
skip_\op:
add x10, x10, #2
cmp x3, x10
b.gt loop1_\op
finished_\op:
mov x10, #0
msr csselr_el1, x10
dsb sy
isb
.endm
/*
* Disable the MMU.
*
* Arguments:
* system control register for the appropriate exception level
* temporary register
*
* This clears bits 0, 2 and 12 in the control register
* which map to M (MMU disable), C (cache disable) and I (icache disable)
* bits.
*/
.macro disable_mmu sctlr tmp
mrs \tmp, \sctlr
bic \tmp, \tmp, #(1 << 0)
bic \tmp, \tmp, #(1 << 2)
bic \tmp, \tmp, #(1 << 12)
msr \sctlr, \tmp
isb
.endm
/*
* Enable the MMU.
*
* Arguments:
* system control register for the appropriate exception level
* temporary register
*
* This set bits 0, 2 and 12 in the control register
* which map to M (MMU enable), C (cache enable) and I (icache enable)
* bits.
*/
.macro enable_mmu sctlr tmp
mrs \tmp, \sctlr
orr \tmp, \tmp, #(1 << 0)
orr \tmp, \tmp, #(1 << 2)
orr \tmp, \tmp, #(1 << 12)
msr \sctlr, \tmp
isb
.endm
/* Standard function decorators. */
#define BEGIN_FUNC(_name) \
.global _name ; \
.type _name, %function ; \
_name:
#define END_FUNC(_name) \
.size _name, .-_name
/* Invalidate the D-cache */
BEGIN_FUNC(invalidate_dcache)
dcache isw
ret
END_FUNC(invalidate_dcache)
/* Flush the D-cache */
BEGIN_FUNC(flush_dcache)
dcache cisw
ret
END_FUNC(flush_dcache)
/* Invalidate the I-cache */
BEGIN_FUNC(invalidate_icache)
ic iallu
dsb nsh
isb
ret
END_FUNC(invalidate_icache)
/*
* Switch from to running in EL1 (assumes correctly running in EL2).
*/
BEGIN_FUNC(switch_to_el1)
/* push frame pointer and link register to the stack */
stp x29, x30, [sp, #-16]!
mov x29, sp
bl flush_dcache
/* Disable EL2 MMU (stage1) */
disable_mmu sctlr_el2, x9
bl invalidate_icache
/* Set execution state for EL1 to AArch64 -- disable virtualization */
mov x9, #(1 << 31)
msr hcr_el2, x9
/* Disable traps to EL2 */
/* FIXME: This enables the 'TZ' bit, which seems to be against the design */
mov x9, #0x33ff
msr cptr_el2, x9
msr hstr_el2, xzr
/* Since stage 2 addressing is disable, clear the base register */
msr vttbr_el2, xzr
/* Disable EL1 MMU (stage1) */
disable_mmu sctlr_el1 , x9
/* Set SPSR for EL2
* I => interrupts are masked
* F => fast interrupts are masked
* A => SError interrupts are masked
* D => debug exceptions are masked
* MODE_EL1h => return to EL1 on eret (using SP_EL1, not SP_EL0)
*/
mov x9, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL1h)
msr spsr_el2, x9
/* The same stack is reused */
ldp x29, x30, [sp], #16
mov x10, sp
msr sp_el1, x10
/* set ELR so that it's possible to perform ERET */
msr elr_el2, x30
eret
END_FUNC(switch_to_el1)
BEGIN_FUNC(switch_to_el2)
/*
* RW => run as AArch64
* SMD => disable secure monitor calls
* RES => reserved set as 1
* NS => run as non-secure
*/
mov x9, #(SCR_RW_BIT | SCR_SMD_BIT | SCR_RES_BITS | SCR_NS_BIT)
msr scr_el3, x9
/* Set SPSR for EL3
*/
mov x9, #(PSR_MODE_EL2h)
msr spsr_el3, x9
/* Reuse the stack */
mov x10, sp
msr sp_el2, x10
/* set ELR so RET returns to caller */
msr elr_el3, x30
eret
END_FUNC(switch_to_el2)
BEGIN_FUNC(el1_mmu_enable)
stp x29, x30, [sp, #-16]!
mov x29, sp
bl flush_dcache
/* Ensure I-cache, D-cache and mmu are disabled for EL1/Stage1 */
disable_mmu sctlr_el1, x8
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively are discarded.
*/
bl invalidate_icache
/*
* DEVICE_nGnRnE 000 00000000
* DEVICE_nGnRE 001 00000100
* DEVICE_GRE 010 00001100
* NORMAL_NC 011 01000100
* NORMAL 100 11111111
*/
ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
MAIR(0x04, MT_DEVICE_nGnRE) | \
MAIR(0x0c, MT_DEVICE_GRE) | \
MAIR(0x44, MT_NORMAL_NC) | \
MAIR(0xff, MT_NORMAL)
msr mair_el1, x5
ldr x10, =TCR_TxSZ(48) | TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_TG0_4K | TCR_TG1_4K | TCR_ASID16 /*| TCR_SHARED*/
mrs x9, ID_AA64MMFR0_EL1
bfi x10, x9, #32, #3
msr tcr_el1, x10
/* Setup page tables */
adrp x8, boot_lvl0_lower
msr ttbr0_el1, x8
adrp x8, boot_lvl0_upper
msr ttbr1_el1, x8
isb
/* invalidate all TLB entries for EL1 */
tlbi vmalle1is
dsb ish
isb
enable_mmu sctlr_el1, x8
/* set up a vector table so that if the low-level kernel
* initialization code fails, we have some chance of finding
* out and printing reasonable diagnostics.
*/
adrp x8, arm_vector_table
msr vbar_el1, x8
ldp x29, x30, [sp], #16
ret
END_FUNC(el1_mmu_enable)
BEGIN_FUNC(el2_mmu_enable)
stp x29, x30, [sp, #-16]!
mov x29, sp
/* Disable caches */
bl flush_dcache
/* Ensure I-cache, D-cache and mmu are disabled for EL2/Stage1 */
disable_mmu sctlr_el2, x8
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively are discarded.
*/
bl invalidate_icache
/*
* DEVICE_nGnRnE 000 00000000
* DEVICE_nGnRE 001 00000100
* DEVICE_GRE 010 00001100
* NORMAL_NC 011 01000100
* NORMAL 100 11111111
*/
ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
MAIR(0x04, MT_DEVICE_nGnRE) | \
MAIR(0x0c, MT_DEVICE_GRE) | \
MAIR(0x44, MT_NORMAL_NC) | \
MAIR(0xff, MT_NORMAL)
msr mair_el2, x5
ldr x8, =TCR_T0SZ(48) | TCR_IRGN0_WBWC | TCR_ORGN0_WBWC | TCR_SH0_ISH | TCR_TG0_4K | TCR_PS | TCR_EL2_RES1
msr tcr_el2, x8
isb
/* Setup page tables */
adrp x8, boot_lvl0_lower
msr ttbr0_el2, x8
isb
/* invalidate all TLB entries for EL2 */
tlbi alle2is
dsb ish
isb
enable_mmu sctlr_el2, x8
/* Invalidate instruction cache */
ic ialluis
dsb ish
isb
/* invalidate all TLB entries for EL2 */
tlbi alle2is
dsb ish
isb
ldp x29, x30, [sp], #16
ret
END_FUNC(el2_mmu_enable)
.extern exception_handler
.extern exception_register_state
.macro ventry id
.align 7
/* push some temp registers on the stack */
stp x2, x3, [sp, #-16]
adrp x2, exception_register_state
stp x0, x1, [x2]
mov x0, x2
ldp x2, x3, [sp, #-16]
stp x2, x3, [x0, #16 * 1]
stp x4, x5, [x0, #16 * 2]
stp x6, x7, [x0, #16 * 3]
stp x8, x9, [x0, #16 * 4]
stp x10, x11, [x0, #16 * 5]
stp x12, x13, [x0, #16 * 6]
stp x14, x15, [x0, #16 * 7]
stp x16, x17, [x0, #16 * 8]
stp x18, x19, [x0, #16 * 9]
stp x20, x21, [x0, #16 * 10]
stp x22, x23, [x0, #16 * 11]
stp x24, x25, [x0, #16 * 12]
stp x26, x27, [x0, #16 * 13]
stp x28, x29, [x0, #16 * 14]
mov x0, \id
mrs x1, ESR_EL1
mrs x2, FAR_EL1
b exception_handler
.endm
.align 12
BEGIN_FUNC(arm_vector_table)
ventry #0 // Synchronous EL1t
ventry #1 // IRQ EL1t
ventry #2 // FIQ EL1t
ventry #3 // SError EL1t
ventry #4 // Synchronous EL1h
ventry #5 // IRQ EL1h
ventry #6 // FIQ EL1h
ventry #7 // SError EL1h
ventry #8 // Synchronous 64-bit EL0
ventry #9 // IRQ 64-bit EL0
ventry #10 // FIQ 64-bit EL0
ventry #11 // SError 64-bit EL0
ventry #12 // Synchronous 32-bit EL0
ventry #13 // IRQ 32-bit EL0
ventry #14 // FIQ 32-bit EL0
ventry #15 // SError 32-bit EL0
END_FUNC(arm_vector_table)
|
CHERI-Alliance/CHERI-Microkit | 358 | monitor/src/riscv/crt0.s | /*
* Copyright 2024, UNSW
*
* SPDX-License-Identifier: BSD-2-Clause
*/
.extern main
.extern __global_pointer$
.section ".text.start"
.global _start;
.type _start, %function;
_start:
.option push
.option norelax
1: auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
la s1, (_stack + 0xff0)
mv sp, s1
j main
|
CHERI-Alliance/CHERI-Microkit | 242 | monitor/src/aarch64/crt0.s | /*
* Copyright 2021, Breakaway Consulting Pty. Ltd.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
.extern main
.section ".text.start"
.global _start;
.type _start, %function;
_start:
ldr x1, =_stack + 0xff0
mov sp, x1
b main
|
CHERI-Alliance/CHERI-Microkit | 1,277 | libmicrokit/src/riscv/crt0.S | /*
* Copyright 2024, UNSW
*
* SPDX-License-Identifier: BSD-2-Clause
*/
.extern main
.extern __global_pointer$
#if defined(__CHERI_PURE_CAPABILITY__)
.extern _start_purecap
#endif
.section ".text.start"
.global _start;
.type _start, %function;
_start:
.option push
.option norelax
1:
#if defined(__CHERI_PURE_CAPABILITY__)
/* ca0 and ca1 should hold code and data caps and passed to us on start */
call _start_purecap
/* Make sure after we initialise the captable that
* we don't leak any powerful capabilities to the C user
* eg ca0/ca1 that were used to construct the captable.
* Writing integer registers will clear the capability tags.
*/
call _reset_regs
call main
#else
1: auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
j main
#endif
.globl _reset_regs
_reset_regs:
/* flush the instruction cache */
fence.i
/* Reset all registers except ra and sp */
li tp, 0
li gp, 0
li t0, 0
li t1, 0
li t2, 0
li s0, 0
li s1, 0
li a0, 0
li a1, 0
li a2, 0
li a3, 0
li a4, 0
li a5, 0
li a6, 0
li a7, 0
li s2, 0
li s3, 0
li s4, 0
li s5, 0
li s6, 0
li s7, 0
li s8, 0
li s9, 0
li s10, 0
li s11, 0
li t3, 0
li t4, 0
li t5, 0
li t6, 0
ret
|
CHERI-Alliance/CHERI-Microkit | 199 | libmicrokit/src/aarch64/crt0.s | /*
* Copyright 2021, Breakaway Consulting Pty. Ltd.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
.extern main
.section ".text.start"
.global _start;
.type _start, %function;
_start:
b main
|
cheranratnam87/NLP_Text_Blob | 776 | Python-3.12.4/Python/asm_trampoline.S | .text
.globl _Py_trampoline_func_start
# The following assembly is equivalent to:
# PyObject *
# trampoline(PyThreadState *ts, _PyInterpreterFrame *f,
# int throwflag, py_evaluator evaluator)
# {
# return evaluator(ts, f, throwflag);
# }
_Py_trampoline_func_start:
#ifdef __x86_64__
sub $8, %rsp
call *%rcx
add $8, %rsp
ret
#endif // __x86_64__
#if defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
// ARM64 little endian, 64bit ABI
// generate with aarch64-linux-gnu-gcc 12.1
stp x29, x30, [sp, -16]!
mov x29, sp
blr x3
ldp x29, x30, [sp], 16
ret
#endif
.globl _Py_trampoline_func_end
_Py_trampoline_func_end:
.section .note.GNU-stack,"",@progbits
|
ChihyoA/HeCBenchmark_kernel_fission | 1,351 | openmp/mummergpu/experiments/tests.regs.s | CONTROL,16,17
Q,15,18
QR,15,18
QRT,15,17
QRTm,15,18
QRTmn,15,18
QRTmr,16,18
QRTmrn,16,18
QRTmrt,17,20
QRTmrtn,17,20
QRTmt,16,20
QRTmtn,16,20
QRTn,15,17
QRTr,16,17
QRTrn,16,17
QRTrt,16,19
QRTrtn,16,19
QRTt,15,19
QRTtn,15,19
QRm,15,18
QRmn,15,18
QRmr,16,18
QRmrn,16,18
QRmrt,18,20
QRmrtn,18,20
QRmt,18,20
QRmtn,18,20
QRn,15,18
QRr,16,18
QRrn,16,18
QRrt,16,18
QRrtn,16,18
QRt,15,18
QRtn,15,18
QT,15,17
QTm,15,18
QTmn,15,18
QTmr,16,18
QTmrn,16,18
QTmrt,16,20
QTmrtn,16,20
QTmt,16,20
QTmtn,16,20
QTn,15,17
QTr,16,17
QTrn,16,17
QTrt,16,19
QTrtn,16,19
QTt,15,19
QTtn,15,19
Qm,15,18
Qmn,15,18
Qmr,16,18
Qmrn,16,18
Qmrt,16,20
Qmrtn,16,20
Qmt,15,20
Qmtn,15,20
Qn,15,18
Qr,16,18
Qrn,16,18
Qrt,16,18
Qrtn,16,18
Qt,15,18
Qtn,15,18
R,15,17
RT,15,16
RTm,15,18
RTmn,15,18
RTmr,16,18
RTmrn,16,18
RTmrt,16,20
RTmrtn,16,20
RTmt,16,20
RTmtn,16,20
RTn,15,16
RTr,16,16
RTrn,16,16
RTrt,16,19
RTrtn,16,19
RTt,15,19
RTtn,15,19
Rm,15,16
Rmn,15,16
Rmr,16,16
Rmrn,16,16
Rmrt,16,18
Rmrtn,16,18
Rmt,15,18
Rmtn,15,18
Rn,15,17
Rr,16,17
Rrn,16,17
Rrt,16,17
Rrtn,16,17
Rt,15,17
Rtn,15,17
T,15,16
Tm,15,18
Tmn,15,18
Tmr,16,18
Tmrn,16,18
Tmrt,16,20
Tmrtn,16,20
Tmt,15,20
Tmtn,15,20
Tn,15,16
Tr,16,16
Trn,16,16
Trt,16,19
Trtn,16,19
Tt,15,19
Ttn,15,19
m,15,16
mn,15,16
mr,16,16
mrn,16,16
mrt,16,18
mrtn,16,18
mt,15,18
mtn,15,18
n,16,17
r,16,17
rn,16,17
rt,16,17
rtn,16,17
t,16,17
tn,16,17
|
ChihyoA/HeCBenchmark_kernel_fission | 1,351 | cuda/mummergpu/experiments/tests.regs.s | CONTROL,16,17
Q,15,18
QR,15,18
QRT,15,17
QRTm,15,18
QRTmn,15,18
QRTmr,16,18
QRTmrn,16,18
QRTmrt,17,20
QRTmrtn,17,20
QRTmt,16,20
QRTmtn,16,20
QRTn,15,17
QRTr,16,17
QRTrn,16,17
QRTrt,16,19
QRTrtn,16,19
QRTt,15,19
QRTtn,15,19
QRm,15,18
QRmn,15,18
QRmr,16,18
QRmrn,16,18
QRmrt,18,20
QRmrtn,18,20
QRmt,18,20
QRmtn,18,20
QRn,15,18
QRr,16,18
QRrn,16,18
QRrt,16,18
QRrtn,16,18
QRt,15,18
QRtn,15,18
QT,15,17
QTm,15,18
QTmn,15,18
QTmr,16,18
QTmrn,16,18
QTmrt,16,20
QTmrtn,16,20
QTmt,16,20
QTmtn,16,20
QTn,15,17
QTr,16,17
QTrn,16,17
QTrt,16,19
QTrtn,16,19
QTt,15,19
QTtn,15,19
Qm,15,18
Qmn,15,18
Qmr,16,18
Qmrn,16,18
Qmrt,16,20
Qmrtn,16,20
Qmt,15,20
Qmtn,15,20
Qn,15,18
Qr,16,18
Qrn,16,18
Qrt,16,18
Qrtn,16,18
Qt,15,18
Qtn,15,18
R,15,17
RT,15,16
RTm,15,18
RTmn,15,18
RTmr,16,18
RTmrn,16,18
RTmrt,16,20
RTmrtn,16,20
RTmt,16,20
RTmtn,16,20
RTn,15,16
RTr,16,16
RTrn,16,16
RTrt,16,19
RTrtn,16,19
RTt,15,19
RTtn,15,19
Rm,15,16
Rmn,15,16
Rmr,16,16
Rmrn,16,16
Rmrt,16,18
Rmrtn,16,18
Rmt,15,18
Rmtn,15,18
Rn,15,17
Rr,16,17
Rrn,16,17
Rrt,16,17
Rrtn,16,17
Rt,15,17
Rtn,15,17
T,15,16
Tm,15,18
Tmn,15,18
Tmr,16,18
Tmrn,16,18
Tmrt,16,20
Tmrtn,16,20
Tmt,15,20
Tmtn,15,20
Tn,15,16
Tr,16,16
Trn,16,16
Trt,16,19
Trtn,16,19
Tt,15,19
Ttn,15,19
m,15,16
mn,15,16
mr,16,16
mrn,16,16
mrt,16,18
mrtn,16,18
mt,15,18
mtn,15,18
n,16,17
r,16,17
rn,16,17
rt,16,17
rtn,16,17
t,16,17
tn,16,17
|
Chigozieworldwide/Multi | 15,150 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zstd-sys-2.0.15+zstd.1.5.7/zstd/lib/decompress/huf_decompress_amd64.S | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
#if defined(__ELF__) && defined(__GNUC__)
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
.section .note.GNU-stack,"",%progbits
#if defined(__aarch64__)
/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64.
* See: https://github.com/facebook/zstd/issues/3841
* See: https://gcc.godbolt.org/z/sqr5T4ffK
* See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/
* See: https://reviews.llvm.org/D62609
*/
.pushsection .note.gnu.property, "a"
.p2align 3
.long 4 /* size of the name - "GNU\0" */
.long 0x10 /* size of descriptor */
.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */
.asciz "GNU"
.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4 /* pr_datasz - 4 bytes */
.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */
.p2align 3 /* pr_padding - bring everything to 8 byte alignment */
.popsection
#endif
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilowest */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %rbx /* rbx = ip0 - ilowest */
/* rdx = (ip0 - ilowest) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilowest */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilowest */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %r15 /* r15 = ip0 - ilowest */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilowest) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilowest */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
Chigozieworldwide/Multi | 1,571 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/riscv.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li x10, STACK_DIRECTION_DESCENDING
jr x1
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
add x10, x2, x0
jr x1
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */
.cfi_startproc
add x2, x12, x0
jr x11
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */
.cfi_startproc
sw x1, -12(x13)
sw x2, -16(x13)
addi x2, x13, -16
.cfi_def_cfa x2, 16
.cfi_offset x1, -12
.cfi_offset x2, -16
jalr x1, x12, 0
lw x1, 4(x2)
.cfi_restore x1
lw x2, 0(x2)
.cfi_restore x2
jr x1
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 2,235 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86_64.s | #include "psm.h"
/* NOTE: sysv64 calling convention is used on all x86_64 targets, including Windows! */
.text
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define TYPE(fnname)
#define FUNCTION(fnname) _##fnname
#define SIZE(fnname,endlabel)
#else
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname) .type fnname,@function
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#endif
GLOBL(rust_psm_stack_direction)
.p2align 4
TYPE(rust_psm_stack_direction)
FUNCTION(rust_psm_stack_direction):
/* extern "sysv64" fn() -> u8 (%al) */
.cfi_startproc
movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64
retq
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
GLOBL(rust_psm_stack_pointer)
.p2align 4
TYPE(rust_psm_stack_pointer)
FUNCTION(rust_psm_stack_pointer):
/* extern "sysv64" fn() -> *mut u8 (%rax) */
.cfi_startproc
leaq 8(%rsp), %rax
retq
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
GLOBL(rust_psm_replace_stack)
.p2align 4
TYPE(rust_psm_replace_stack)
FUNCTION(rust_psm_replace_stack):
/* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi.
8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto
stack with a regular call
*/
leaq -8(%rdx), %rsp
jmpq *%rsi
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
GLOBL(rust_psm_on_stack)
.p2align 4
TYPE(rust_psm_on_stack)
FUNCTION(rust_psm_on_stack):
/* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8) */
.cfi_startproc
pushq %rbp
.cfi_def_cfa %rsp, 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rcx, %rsp
callq *%rdx
movq %rbp, %rsp
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
|
Chigozieworldwide/Multi | 1,569 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/riscv64.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li x10, STACK_DIRECTION_DESCENDING
jr x1
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
add x10, x2, x0
jr x1
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */
.cfi_startproc
add x2, x12, x0
jr x11
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */
.cfi_startproc
sd x1, -8(x13)
sd x2, -16(x13)
addi x2, x13, -16
.cfi_def_cfa x2, 16
.cfi_offset x1, -8
.cfi_offset x2, -16
jalr x1, x12, 0
ld x1, 8(x2)
.cfi_restore x1
ld x2, 0(x2)
.cfi_restore x2
jr x1
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 2,080 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/zseries_linux.s | /* Implementation of stack swtiching routines for zSeries LINUX ABI.
This ABI is used by the s390x-unknown-linux-gnu target.
Documents used:
* LINUX for zSeries: ELF Application Binary Interface Supplement (1st ed., 2001) (LNUX-1107-01)
* z/Architecture: Principles of Operation (4th ed., 2004) (SA22-7832-03)
*/
#include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 4
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
lghi %r2, STACK_DIRECTION_DESCENDING
br %r14
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 4
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
la %r2, 0(%r15)
br %r14
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 4
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(r2: usize, r3: extern "C" fn(usize), r4: *mut u8) */
.cfi_startproc
/* FIXME: backtrace does not terminate cleanly for some reason */
lay %r15, -160(%r4)
/* FIXME: this is `basr` instead of `br` purely to remove the backtrace link to the caller */
basr %r14, %r3
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 4
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(r2: usize, r3: usize, r4: extern "C" fn(usize, usize), r5: *mut u8) */
.cfi_startproc
stmg %r14, %r15, -16(%r5)
lay %r15, -176(%r5)
.cfi_def_cfa %r15, 176
.cfi_offset %r14, -16
.cfi_offset %r15, -8
basr %r14, %r4
lmg %r14, %r15, 160(%r15)
.cfi_restore %r14
.cfi_restore %r15
br %r14
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 2,264 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86_windows_gnu.s | /* FIXME: this works locally but not on appveyor??!? */
/* NOTE: fastcall calling convention used on all x86 targets */
.text
.def @rust_psm_stack_direction@0
.scl 2
.type 32
.endef
.globl @rust_psm_stack_direction@0
.p2align 4
@rust_psm_stack_direction@0:
/* extern "fastcall" fn() -> u8 (%al) */
.cfi_startproc
movb $2, %al # always descending on x86_64
retl
.cfi_endproc
.def @rust_psm_stack_pointer@0
.scl 2
.type 32
.endef
.globl @rust_psm_stack_pointer@0
.p2align 4
@rust_psm_stack_pointer@0:
/* extern "fastcall" fn() -> *mut u8 (%rax) */
.cfi_startproc
leal 4(%esp), %eax
retl
.cfi_endproc
.def @rust_psm_replace_stack@16
.scl 2
.type 32
.endef
.globl @rust_psm_replace_stack@16
.p2align 4
@rust_psm_replace_stack@16:
/* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx
Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address
would be store there) off the required stack alignment on entry. To offset the stack in such a
way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but
would require to adjust the stack manually, which cannot be easily done, because the stack
pointer argument is already stored in memory.
*/
movl 8(%esp), %eax
mov %eax, %fs:0x08
movl 4(%esp), %esp
mov %esp, %fs:0x04
calll *%edx
ud2
.cfi_endproc
.def @rust_psm_on_stack@16
.scl 2
.type 32
.endef
.globl @rust_psm_on_stack@16
.p2align 4
@rust_psm_on_stack@16:
/* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */
.cfi_startproc
pushl %ebp
.cfi_def_cfa %esp, 8
.cfi_offset %ebp, -8
pushl %fs:0x04
.cfi_def_cfa %esp, 12
pushl %fs:0x08
.cfi_def_cfa %esp, 16
movl %esp, %ebp
.cfi_def_cfa_register %ebp
movl 24(%ebp), %eax
movl %eax, %fs:0x08
movl 20(%ebp), %esp
movl %esp, %fs:0x04
calll *16(%ebp)
movl %ebp, %esp
popl %fs:0x08
.cfi_def_cfa %esp, 12
popl %fs:0x04
.cfi_def_cfa %esp, 8
popl %ebp
.cfi_def_cfa %esp, 4
retl $12
.cfi_endproc
|
Chigozieworldwide/Multi | 1,963 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc32.s | #include "psm.h"
/* FIXME: this probably does not cover all ABIs? Tested with sysv only, possibly works for AIX as
well?
*/
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
/* NOTE: perhaps add a debug-assertion for stack alignment? */
addi 5, 5, -16
mr 1, 5
mtctr 4
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
stw 0, -24(6)
sub 6, 6, 1
addi 6, 6, -32
stwux 1, 1, 6
.cfi_def_cfa r1, 32
.cfi_offset r1, -32
.cfi_offset lr, -24
mtctr 5
bctrl
lwz 0, 8(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instruction backtrace breaks until control returns to the caller
That being said compiler-generated code has the same issue, so I guess that is fine for now?
*/
lwz 1, 0(1)
.cfi_restore r1
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 2,218 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86_64_windows_gnu.s | .text
.def rust_psm_stack_direction
.scl 2
.type 32
.endef
.globl rust_psm_stack_direction
.p2align 4
rust_psm_stack_direction:
/* extern "sysv64" fn() -> u8 (%al) */
.cfi_startproc
movb $2, %al # always descending on x86_64
retq
.cfi_endproc
.def rust_psm_stack_pointer
.scl 2
.type 32
.endef
.globl rust_psm_stack_pointer
.p2align 4
rust_psm_stack_pointer:
/* extern "sysv64" fn() -> *mut u8 (%rax) */
.cfi_startproc
leaq 8(%rsp), %rax
retq
.cfi_endproc
.def rust_psm_replace_stack
.scl 2
.type 32
.endef
.globl rust_psm_replace_stack
.p2align 4
rust_psm_replace_stack:
/* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8, %rcx: *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi.
8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto
stack with a regular call
*/
movq %gs:0x08, %rdx
movq %gs:0x10, %rcx
leaq -8(%rdx), %rsp
jmpq *%rsi
.cfi_endproc
.def rust_psm_on_stack
.scl 2
.type 32
.endef
.globl rust_psm_on_stack
.p2align 4
rust_psm_on_stack:
/*
extern "sysv64" fn(%rdi: usize, %rsi: usize,
%rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8, %r8: *mut u8)
NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be
fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for
exception handlers at *all* if they happen to fall outside the are specified in TIB.
This necessitates an API difference from the usual 4-argument signature used elsewhere.
FIXME: this needs a catch-all exception handler that aborts in case somebody unwinds into here.
*/
.cfi_startproc
pushq %rbp
.cfi_def_cfa %rsp, 16
.cfi_offset %rbp, -16
pushq %gs:0x08
.cfi_def_cfa %rsp, 24
pushq %gs:0x10
.cfi_def_cfa %rsp, 32
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rcx, %gs:0x08
movq %r8, %gs:0x10
movq %rcx, %rsp
callq *%rdx
movq %rbp, %rsp
popq %gs:0x10
.cfi_def_cfa %rsp, 24
popq %gs:0x08
.cfi_def_cfa %rsp, 16
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.cfi_endproc
|
Chigozieworldwide/Multi | 2,299 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/arm_aapcs.s | #include "psm.h"
.text
.syntax unified
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define THUMBTYPE(fnname) .thumb_func _##fnname
#define FUNCTION(fnname) _##fnname
#define THUMBFN .code 16
#define SIZE(fnname,endlabel)
#define FNSTART
#define CANTUNWIND
#define FNEND
#else
#define GLOBL(fnname) .globl fnname
#define THUMBTYPE(fnname) .type fnname,%function
#define FUNCTION(fnname) fnname
#define THUMBFN .code 16
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#define FNSTART .fnstart
#define CANTUNWIND .cantunwind
#define FNEND .fnend
#endif
GLOBL(rust_psm_stack_direction)
.p2align 2
THUMBTYPE(rust_psm_stack_direction)
THUMBFN
FUNCTION(rust_psm_stack_direction):
/* extern "C" fn() -> u8 */
FNSTART
.cfi_startproc
/* movs to support Thumb-1 */
movs r0, #STACK_DIRECTION_DESCENDING
bx lr
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
CANTUNWIND
FNEND
GLOBL(rust_psm_stack_pointer)
.p2align 2
THUMBTYPE(rust_psm_stack_pointer)
THUMBFN
FUNCTION(rust_psm_stack_pointer):
/* extern "C" fn() -> *mut u8 */
FNSTART
.cfi_startproc
mov r0, sp
bx lr
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
CANTUNWIND
FNEND
GLOBL(rust_psm_replace_stack)
.p2align 2
THUMBTYPE(rust_psm_replace_stack)
THUMBFN
FUNCTION(rust_psm_replace_stack):
/* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */
FNSTART
.cfi_startproc
/* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */
mov sp, r2
bx r1
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
CANTUNWIND
FNEND
GLOBL(rust_psm_on_stack)
.p2align 2
THUMBTYPE(rust_psm_on_stack)
THUMBFN
FUNCTION(rust_psm_on_stack):
/* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */
FNSTART
.cfi_startproc
push {r4, lr}
.cfi_def_cfa_offset 8
mov r4, sp
.cfi_def_cfa_register r4
.cfi_offset lr, -4
.cfi_offset r4, -8
mov sp, r3
blx r2
mov sp, r4
.cfi_restore sp
pop {r4, pc}
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
CANTUNWIND
FNEND
|
Chigozieworldwide/Multi | 1,938 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/sparc64.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov STACK_DIRECTION_DESCENDING, %o0
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov %o6, %o0
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */
.cfi_startproc
.cfi_def_cfa 0, 0
.cfi_return_column 0
jmpl %o1, %g0
/* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */
add %o2, -0x7ff, %o6
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */
.cfi_startproc
/* The fact that locals and saved register windows are offset by 2kB is
very nasty property of SPARC architecture and ABI. In this case it forces us to slice off
2kB of the stack space outright for no good reason other than adapting to a botched design.
*/
save %o3, -0x87f, %o6
.cfi_def_cfa_register %fp
.cfi_window_save
.cfi_register %r15, %r31
mov %i1, %o1
jmpl %i2, %o7
mov %i0, %o0
ret
restore
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 1,722 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/sparc_sysv.s | #include "psm.h"
/* FIXME: this ABI has definitely not been verified at all */
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov STACK_DIRECTION_DESCENDING, %o0
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
jmpl %o7 + 8, %g0
mov %o6, %o0
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */
.cfi_startproc
.cfi_def_cfa 0, 0
.cfi_return_column 0
jmpl %o1, %g0
/* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */
add %o2, -0x3ff, %o6
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */
.cfi_startproc
save %o3, -0x43f, %o6
.cfi_def_cfa_register %fp
.cfi_window_save
.cfi_register %r15, %r31
mov %i1, %o1
jmpl %i2, %o7
mov %i0, %o0
ret
restore
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 3,609 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64_aix.s | .csect .text[PR],2
.file "powerpc64_aix.s"
.globl rust_psm_stack_direction[DS]
.globl .rust_psm_stack_direction
.align 4
.csect rust_psm_stack_direction[DS],3
.vbyte 8, .rust_psm_stack_direction
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_stack_direction:
# extern "C" fn() -> u8
li 3, 2
blr
L..rust_psm_stack_direction_end:
# Following bytes form the traceback table on AIX.
# For specification, see https://www.ibm.com/docs/en/aix/7.2?topic=processor-traceback-tables.
# For implementation, see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp,
# `PPCAIXAsmPrinter::emitTracebackTable`.
.vbyte 4, 0x00000000 # Traceback table begin, for unwinder to search the table.
.byte 0x00 # Version = 0
.byte 0x09 # Language = CPlusPlus, since rust is using C++-like LSDA.
.byte 0x20 # -IsGlobaLinkage, -IsOutOfLineEpilogOrPrologue
# +HasTraceBackTableOffset, -IsInternalProcedure
# -HasControlledStorage, -IsTOCless
# -IsFloatingPointPresent
# -IsFloatingPointOperationLogOrAbortEnabled
.byte 0x40 # -IsInterruptHandler, +IsFunctionNamePresent, -IsAllocaUsed
# OnConditionDirective = 0, -IsCRSaved, -IsLRSaved
.byte 0x80 # +IsBackChainStored, -IsFixup, NumOfFPRsSaved = 0
.byte 0x00 # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 0
.byte 0x00 # NumberOfFixedParms = 0
.byte 0x01 # NumberOfFPParms = 0, +HasParmsOnStack
.vbyte 4, L..rust_psm_stack_direction_end-.rust_psm_stack_direction #Function size
.vbyte 2, 0x0018 # Function name len = 24
.byte "rust_psm_stack_direction" # Function Name
.globl rust_psm_stack_pointer[DS]
.globl .rust_psm_stack_pointer
.align 4
.csect rust_psm_stack_pointer[DS],3
.vbyte 8, .rust_psm_stack_pointer
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_stack_pointer:
# extern "C" fn() -> *mut u8
mr 3, 1
blr
L..rust_psm_stack_pointer_end:
.vbyte 4, 0x00000000
.byte 0x00
.byte 0x09
.byte 0x20
.byte 0x40
.byte 0x80
.byte 0x00
.byte 0x00
.byte 0x01
.vbyte 4, L..rust_psm_stack_pointer_end-.rust_psm_stack_pointer
.vbyte 2, 0x0016
.byte "rust_psm_stack_pointer"
.globl rust_psm_replace_stack[DS]
.globl .rust_psm_replace_stack
.align 4
.csect rust_psm_replace_stack[DS],3
.vbyte 8, .rust_psm_replace_stack
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_replace_stack:
# extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8)
# Load the function pointer and toc pointer from TOC and make the call.
ld 2, 8(4)
ld 4, 0(4)
addi 5, 5, -48
mr 1, 5
mtctr 4
bctr
L..rust_psm_replace_stack_end:
.vbyte 4, 0x00000000
.byte 0x00
.byte 0x09
.byte 0x20
.byte 0x40
.byte 0x80
.byte 0x00
.byte 0x03
.byte 0x01
.vbyte 4, 0x00000000 # Parameter type = i, i, i
.vbyte 4, L..rust_psm_replace_stack_end-.rust_psm_replace_stack
.vbyte 2, 0x0016
.byte "rust_psm_replace_stack"
.globl rust_psm_on_stack[DS]
.globl .rust_psm_on_stack
.align 4
.csect rust_psm_on_stack[DS],3
.vbyte 8, .rust_psm_on_stack
.vbyte 8, TOC[TC0]
.vbyte 8, 0
.csect .text[PR],2
.rust_psm_on_stack:
# extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8)
mflr 0
std 2, -72(6)
std 0, -8(6)
sub 6, 6, 1
addi 6, 6, -112
stdux 1, 1, 6
ld 2, 8(5)
ld 5, 0(5)
mtctr 5
bctrl
ld 2, 40(1)
ld 0, 104(1)
mtlr 0
ld 1, 0(1)
blr
L..rust_psm_on_stack_end:
.vbyte 4, 0x00000000
.byte 0x00
.byte 0x09
.byte 0x20
.byte 0x41
.byte 0x80
.byte 0x00
.byte 0x04
.byte 0x01
.vbyte 4, 0x00000000 # Parameter type = i, i, i, i
.vbyte 4, L..rust_psm_on_stack_end-.rust_psm_on_stack
.vbyte 2, 0x0011
.byte "rust_psm_on_stack"
.toc
|
Chigozieworldwide/Multi | 2,557 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/x86.s | #include "psm.h"
/* NOTE: fastcall calling convention used on all x86 targets */
.text
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define TYPE(fnname)
#define FUNCTION(fnname) _##fnname
#define SIZE(fnname,endlabel)
#else
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname) .type fnname,@function
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#endif
GLOBL(rust_psm_stack_direction)
.p2align 4
TYPE(rust_psm_stack_direction)
FUNCTION(rust_psm_stack_direction):
/* extern "fastcall" fn() -> u8 (%al) */
.cfi_startproc
movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64
retl
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
GLOBL(rust_psm_stack_pointer)
.p2align 4
TYPE(rust_psm_stack_pointer)
FUNCTION(rust_psm_stack_pointer):
/* extern "fastcall" fn() -> *mut u8 (%rax) */
.cfi_startproc
leal 4(%esp), %eax
retl
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
GLOBL(rust_psm_replace_stack)
.p2align 4
TYPE(rust_psm_replace_stack)
FUNCTION(rust_psm_replace_stack):
/* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */
.cfi_startproc
/*
All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx
Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address
would be store there) off the required stack alignment on entry. To offset the stack in such a
way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but
would require to adjust the stack manually, which cannot be easily done, because the stack
pointer argument is already stored in memory.
*/
movl 4(%esp), %esp
calll *%edx
ud2
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
GLOBL(rust_psm_on_stack)
.p2align 4
TYPE(rust_psm_on_stack)
FUNCTION(rust_psm_on_stack):
/* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */
.cfi_startproc
pushl %ebp
.cfi_def_cfa %esp, 8
.cfi_offset %ebp, -8
movl %esp, %ebp
.cfi_def_cfa_register %ebp
movl 12(%ebp), %esp
calll *8(%ebp)
movl %ebp, %esp
popl %ebp
.cfi_def_cfa %esp, 4
retl $8
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
|
Chigozieworldwide/Multi | 2,149 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/mips_eabi.s | /*
Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is
used where.
This is an "EABI" implementation based on the following page:
http://www.cygwin.com/ml/binutils/2003-06/msg00436.html
*/
#include "psm.h"
.set noreorder /* we’ll manage the delay slots on our own, thanks! */
.text
.abicalls
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
.ent rust_psm_stack_direction
/* extern "C" fn() -> u8 */
rust_psm_stack_direction:
.cfi_startproc
jr $31
addiu $2, $zero, STACK_DIRECTION_DESCENDING
.end rust_psm_stack_direction
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
.ent rust_psm_stack_pointer
/* extern "C" fn() -> *mut u8 */
rust_psm_stack_pointer:
.cfi_startproc
jr $31
move $2, $29
.end rust_psm_stack_pointer
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
.ent rust_psm_replace_stack
/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
rust_psm_replace_stack:
.cfi_startproc
move $25, $5
jr $5
move $29, $6
.end rust_psm_replace_stack
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
.ent rust_psm_on_stack
/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */
rust_psm_on_stack:
.cfi_startproc
sw $29, -4($7)
sw $31, -8($7)
.cfi_def_cfa 7, 0
.cfi_offset 31, -8
.cfi_offset 29, -4
move $25, $6
jalr $31, $6
addiu $29, $7, -8
.cfi_def_cfa 29, 8
lw $31, 0($29)
.cfi_restore 31
lw $29, 4($29)
.cfi_restore 29
jr $31
nop
.end rust_psm_on_stack
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 1,568 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/loongarch64.s | #include "psm.h"
.text
.globl rust_psm_stack_direction
.align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li.w $r4, STACK_DIRECTION_DESCENDING
jr $r1
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
move $r4, $r3
jr $r1
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
.cfi_startproc
move $r3, $r6
jr $r5
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize, usize), r7: *mut u8) */
.cfi_startproc
st.d $r1, $r7, -8
st.d $r3, $r7, -16
addi.d $r3, $r7, -16
.cfi_def_cfa 3, 16
.cfi_offset 1, -8
.cfi_offset 3, -16
jirl $r1, $r6, 0
ld.d $r1, $r3, 8
.cfi_restore 1
ld.d $r3, $r3, 0
.cfi_restore 3
jr $r1
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 2,137 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/aarch_aapcs64.s | #include "psm.h"
.text
#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
#define GLOBL(fnname) .globl _##fnname
#define TYPE(fnname)
#define FUNCTION(fnname) _##fnname
#define SIZE(fnname,endlabel)
#elif CFG_TARGET_OS_windows
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname)
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel)
#else
#define GLOBL(fnname) .globl fnname
#define TYPE(fnname) .type fnname,@function
#define FUNCTION(fnname) fnname
#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
#endif
GLOBL(rust_psm_stack_direction)
.p2align 2
TYPE(rust_psm_stack_direction)
FUNCTION(rust_psm_stack_direction):
/* extern "C" fn() -> u8 */
.cfi_startproc
orr w0, wzr, #STACK_DIRECTION_DESCENDING
ret
.rust_psm_stack_direction_end:
SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
.cfi_endproc
GLOBL(rust_psm_stack_pointer)
.p2align 2
TYPE(rust_psm_stack_pointer)
FUNCTION(rust_psm_stack_pointer):
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mov x0, sp
ret
.rust_psm_stack_pointer_end:
SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
.cfi_endproc
GLOBL(rust_psm_replace_stack)
.p2align 2
TYPE(rust_psm_replace_stack)
FUNCTION(rust_psm_replace_stack):
/* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */
.cfi_startproc
/* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */
mov sp, x2
br x1
.rust_psm_replace_stack_end:
SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
.cfi_endproc
GLOBL(rust_psm_on_stack)
.p2align 2
TYPE(rust_psm_on_stack)
FUNCTION(rust_psm_on_stack):
/* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */
.cfi_startproc
stp x29, x30, [sp, #-16]!
.cfi_def_cfa sp, 16
mov x29, sp
.cfi_def_cfa x29, 16
.cfi_offset x29, -16
.cfi_offset x30, -8
mov sp, x3
blr x2
mov sp, x29
.cfi_def_cfa sp, 16
ldp x29, x30, [sp], #16
.cfi_def_cfa sp, 0
.cfi_restore x29
.cfi_restore x30
ret
.rust_psm_on_stack_end:
SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
.cfi_endproc
|
Chigozieworldwide/Multi | 2,144 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/mips64_eabi.s | /*
Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is
used where.
This is an "EABI" implementation based on the following page:
http://www.cygwin.com/ml/binutils/2003-06/msg00436.html
*/
#include "psm.h"
.set noreorder /* we’ll manage the delay slots on our own, thanks! */
.text
.globl rust_psm_stack_direction
.p2align 3
.type rust_psm_stack_direction,@function
.ent rust_psm_stack_direction
/* extern "C" fn() -> u8 */
rust_psm_stack_direction:
.cfi_startproc
jr $31
addiu $2, $zero, STACK_DIRECTION_DESCENDING
.end rust_psm_stack_direction
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 3
.type rust_psm_stack_pointer,@function
.ent rust_psm_stack_pointer
/* extern "C" fn() -> *mut u8 */
rust_psm_stack_pointer:
.cfi_startproc
jr $31
move $2, $29
.end rust_psm_stack_pointer
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 3
.type rust_psm_replace_stack,@function
.ent rust_psm_replace_stack
/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
rust_psm_replace_stack:
.cfi_startproc
move $25, $5
jr $5
move $29, $6
.end rust_psm_replace_stack
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 3
.type rust_psm_on_stack,@function
.ent rust_psm_on_stack
/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */
rust_psm_on_stack:
.cfi_startproc
sd $29, -8($7)
sd $31, -16($7)
.cfi_def_cfa 7, 0
.cfi_offset 31, -16
.cfi_offset 29, -8
move $25, $6
jalr $31, $6
daddiu $29, $7, -16
.cfi_def_cfa 29, 16
ld $31, 0($29)
.cfi_restore 31
ld $29, 8($29)
.cfi_restore 29
jr $31
nop
.end rust_psm_on_stack
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 1,694 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/wasm32.s | #include "psm.h"
# Note that this function is not compiled when this package is uploaded to
# crates.io, this source is only here as a reference for how the corresponding
# wasm32.o was generated. This file can be compiled with:
#
# cpp psm/src/arch/wasm32.s | llvm-mc -o psm/src/arch/wasm32.o --arch=wasm32 -filetype=obj
#
# where you'll want to ensure that `llvm-mc` is from a relatively recent
# version of LLVM.
.globaltype __stack_pointer, i32
.globl rust_psm_stack_direction
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
.functype rust_psm_stack_direction () -> (i32)
i32.const STACK_DIRECTION_DESCENDING
end_function
.globl rust_psm_stack_pointer
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
.functype rust_psm_stack_pointer () -> (i32)
global.get __stack_pointer
end_function
.globl rust_psm_on_stack
.type rust_psm_on_stack,@function
rust_psm_on_stack:
.functype rust_psm_on_stack (i32, i32, i32, i32) -> ()
# get our new stack argument, then save the old stack
# pointer into that local
local.get 3
global.get __stack_pointer
local.set 3
global.set __stack_pointer
# Call our indirect function specified
local.get 0
local.get 1
local.get 2
call_indirect (i32, i32) -> ()
# restore the stack pointer before returning
local.get 3
global.set __stack_pointer
end_function
.globl rust_psm_replace_stack
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
.functype rust_psm_replace_stack (i32, i32, i32) -> ()
local.get 2
global.set __stack_pointer
local.get 0
local.get 1
call_indirect (i32) -> ()
unreachable
end_function
|
Chigozieworldwide/Multi | 2,541 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64.s | /* Implementation of the AIX-like PowerPC ABI. Seems to be used by the big-endian PowerPC targets.
The following references were used during the implementation of this code:
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_rntime_stack.htm
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_reg_use_conv.htm
https://www.ibm.com/developerworks/library/l-powasm4/index.html
*/
#include "psm.h"
.text
.globl rust_psm_stack_direction
.p2align 2
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 2
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 2
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
ld 2, 8(4)
ld 4, 0(4)
/* do not allocate the whole 112-byte sized frame, we know wont be used */
addi 5, 5, -48
mr 1, 5
mtctr 4
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 2
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
std 2, -72(6)
std 0, -8(6)
sub 6, 6, 1
addi 6, 6, -112
stdux 1, 1, 6
.cfi_def_cfa r1, 112
.cfi_offset r1, -112
.cfi_offset r2, -72
.cfi_offset lr, -8
/* load the function pointer from TOC and make the call */
ld 2, 8(5)
ld 5, 0(5)
mtctr 5
bctrl
ld 2, 40(1)
.cfi_restore r2
ld 0, 104(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instruction backtrace breaks until control returns to the caller.
That being said compiler-generated code has the same issue, so I guess that is fine for now?
*/
ld 1, 0(1)
.cfi_restore r1
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 2,045 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64_openpower.s | /* Implementation of stack swtiching routines for OpenPOWER 64-bit ELF ABI
The specification can be found at
http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html
This ABI is usually used by the ppc64le targets.
*/
#include "psm.h"
.text
.abiversion 2
.globl rust_psm_stack_direction
.p2align 4
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 4
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 4
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
addi 5, 5, -32
mtctr 4
mr 12, 4
mr 1, 5
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 4
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
std 0, -8(6)
std 2, -24(6)
sub 6, 6, 1
addi 6, 6, -48
stdux 1, 1, 6
.cfi_def_cfa r1, 48
.cfi_offset r1, -48
.cfi_offset r2, -24
.cfi_offset lr, -8
mr 12, 5
mtctr 5
bctrl
ld 2, 24(1)
.cfi_restore r2
ld 0, 40(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instructin backtrace breaks until control returns to the caller */
ld 1, 0(1)
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
Chigozieworldwide/Multi | 30,650 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/armv8-mont-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl bn_mul_mont_nohw
.def bn_mul_mont_nohw
.type 32
.endef
.align 5
bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_sqr8x_mont
.def bn_sqr8x_mont
.type 32
.endef
.align 5
bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_mul4x_mont
.def bn_mul4x_mont
.type 32
.endef
.align 5
bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Chigozieworldwide/Multi | 28,780 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha-armv4-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if defined(__thumb2__) || defined(__clang__)
#define ldrhsb ldrbhs
#endif
.align 5
.Lsigma:
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
.Lone:
.long 1,0,0,0
.globl ChaCha20_ctr32_nohw
.hidden ChaCha20_ctr32_nohw
.type ChaCha20_ctr32_nohw,%function
.align 5
ChaCha20_ctr32_nohw:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
adr r14,.Lsigma
ldmia r12,{r4,r5,r6,r7} @ load counter and nonce
sub sp,sp,#4*(16) @ off-load area
stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
ldmia r14,{r0,r1,r2,r3} @ load sigma
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key
stmdb sp!,{r0,r1,r2,r3} @ copy sigma
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
b .Loop_outer_enter
.align 4
.Loop_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
str r11,[sp,#4*(32+2)] @ save len
str r12, [sp,#4*(32+1)] @ save inp
str r14, [sp,#4*(32+0)] @ save out
.Loop_outer_enter:
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(16+15)]
mov r11,#10
b .Loop
.align 4
.Loop:
subs r11,r11,#1
add r0,r0,r4
mov r12,r12,ror#16
add r1,r1,r5
mov r10,r10,ror#16
eor r12,r12,r0,ror#16
eor r10,r10,r1,ror#16
add r8,r8,r12
mov r4,r4,ror#20
add r9,r9,r10
mov r5,r5,ror#20
eor r4,r4,r8,ror#20
eor r5,r5,r9,ror#20
add r0,r0,r4
mov r12,r12,ror#24
add r1,r1,r5
mov r10,r10,ror#24
eor r12,r12,r0,ror#24
eor r10,r10,r1,ror#24
add r8,r8,r12
mov r4,r4,ror#25
add r9,r9,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+13)]
ldr r10,[sp,#4*(16+15)]
eor r4,r4,r8,ror#25
eor r5,r5,r9,ror#25
str r8,[sp,#4*(16+8)]
ldr r8,[sp,#4*(16+10)]
add r2,r2,r6
mov r14,r14,ror#16
str r9,[sp,#4*(16+9)]
ldr r9,[sp,#4*(16+11)]
add r3,r3,r7
mov r10,r10,ror#16
eor r14,r14,r2,ror#16
eor r10,r10,r3,ror#16
add r8,r8,r14
mov r6,r6,ror#20
add r9,r9,r10
mov r7,r7,ror#20
eor r6,r6,r8,ror#20
eor r7,r7,r9,ror#20
add r2,r2,r6
mov r14,r14,ror#24
add r3,r3,r7
mov r10,r10,ror#24
eor r14,r14,r2,ror#24
eor r10,r10,r3,ror#24
add r8,r8,r14
mov r6,r6,ror#25
add r9,r9,r10
mov r7,r7,ror#25
eor r6,r6,r8,ror#25
eor r7,r7,r9,ror#25
add r0,r0,r5
mov r10,r10,ror#16
add r1,r1,r6
mov r12,r12,ror#16
eor r10,r10,r0,ror#16
eor r12,r12,r1,ror#16
add r8,r8,r10
mov r5,r5,ror#20
add r9,r9,r12
mov r6,r6,ror#20
eor r5,r5,r8,ror#20
eor r6,r6,r9,ror#20
add r0,r0,r5
mov r10,r10,ror#24
add r1,r1,r6
mov r12,r12,ror#24
eor r10,r10,r0,ror#24
eor r12,r12,r1,ror#24
add r8,r8,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+15)]
ldr r10,[sp,#4*(16+13)]
add r9,r9,r12
mov r6,r6,ror#25
eor r5,r5,r8,ror#25
eor r6,r6,r9,ror#25
str r8,[sp,#4*(16+10)]
ldr r8,[sp,#4*(16+8)]
add r2,r2,r7
mov r10,r10,ror#16
str r9,[sp,#4*(16+11)]
ldr r9,[sp,#4*(16+9)]
add r3,r3,r4
mov r14,r14,ror#16
eor r10,r10,r2,ror#16
eor r14,r14,r3,ror#16
add r8,r8,r10
mov r7,r7,ror#20
add r9,r9,r14
mov r4,r4,ror#20
eor r7,r7,r8,ror#20
eor r4,r4,r9,ror#20
add r2,r2,r7
mov r10,r10,ror#24
add r3,r3,r4
mov r14,r14,ror#24
eor r10,r10,r2,ror#24
eor r14,r14,r3,ror#24
add r8,r8,r10
mov r7,r7,ror#25
add r9,r9,r14
mov r4,r4,ror#25
eor r7,r7,r8,ror#25
eor r4,r4,r9,ror#25
bne .Loop
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
cmp r11,#64 @ done yet?
#ifdef __thumb2__
itete lo
#endif
addlo r12,sp,#4*(0) @ shortcut or ...
ldrhs r12,[sp,#4*(32+1)] @ ... load inp
addlo r14,sp,#4*(0) @ shortcut or ...
ldrhs r14,[sp,#4*(32+0)] @ ... load out
ldr r8,[sp,#4*(0)] @ load key material
ldr r9,[sp,#4*(1)]
#if __ARM_ARCH>=6 || !defined(__ARMEB__)
# if __ARM_ARCH<7
orr r10,r12,r14
tst r10,#3 @ are input and output aligned?
ldr r10,[sp,#4*(2)]
bne .Lunaligned
cmp r11,#64 @ restore flags
# else
ldr r10,[sp,#4*(2)]
# endif
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8 @ xor with input
eorhs r1,r1,r9
add r8,sp,#4*(4)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r1,[r14,#-12]
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
add r8,sp,#4*(8)
str r4,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r5,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8
eorhs r1,r1,r9
add r8,sp,#4*(12)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
str r1,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r4,[r14],#16 @ store output
str r5,[r14,#-12]
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi .Loop_outer
beq .Ldone
# if __ARM_ARCH<7
b .Ltail
.align 4
.Lunaligned:@ unaligned endian-neutral path
cmp r11,#64 @ restore flags
# endif
#endif
#if __ARM_ARCH<7
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+0)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r0,sp,#4*(16+8)
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
add r8,sp,#4*(4+4)
ldmia r8,{r8,r9,r10,r11} @ load key material
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx"
strhi r11,[sp,#4*(16+11)] @ copy "rx"
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+8)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
bhi .Loop_outer
beq .Ldone
#endif
.Ltail:
ldr r12,[sp,#4*(32+1)] @ load inp
add r9,sp,#4*(0)
ldr r14,[sp,#4*(32+0)] @ load out
.Loop_tail:
ldrb r10,[r9],#1 @ read buffer on stack
ldrb r11,[r12],#1 @ read input
subs r8,r8,#1
eor r11,r11,r10
strb r11,[r14],#1 @ store output
bne .Loop_tail
.Ldone:
add sp,sp,#4*(32+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
.size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl ChaCha20_ctr32_neon
.hidden ChaCha20_ctr32_neon
.type ChaCha20_ctr32_neon,%function
.align 5
ChaCha20_ctr32_neon:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
adr r14,.Lsigma
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so
stmdb sp!,{r0,r1,r2,r3}
vld1.32 {q1,q2},[r3] @ load key
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
sub sp,sp,#4*(16+16)
vld1.32 {q3},[r12] @ load counter and nonce
add r12,sp,#4*8
ldmia r14,{r0,r1,r2,r3} @ load sigma
vld1.32 {q0},[r14]! @ load sigma
vld1.32 {q12},[r14] @ one
vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce
vst1.32 {q0,q1},[sp] @ copy sigma|1/2key
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
vshl.i32 d26,d24,#1 @ two
vstr d24,[sp,#4*(16+0)]
vshl.i32 d28,d24,#2 @ four
vstr d26,[sp,#4*(16+2)]
vmov q4,q0
vstr d28,[sp,#4*(16+4)]
vmov q8,q0
vmov q5,q1
vmov q9,q1
b .Loop_neon_enter
.align 4
.Loop_neon_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
cmp r11,#64*2 @ if len<=64*2
bls .Lbreak_neon @ switch to integer-only
vmov q4,q0
str r11,[sp,#4*(32+2)] @ save len
vmov q8,q0
str r12, [sp,#4*(32+1)] @ save inp
vmov q5,q1
str r14, [sp,#4*(32+0)] @ save out
vmov q9,q1
.Loop_neon_enter:
ldr r11, [sp,#4*(15)]
vadd.i32 q7,q3,q12 @ counter+1
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
vmov q6,q2
ldr r10, [sp,#4*(13)]
vmov q10,q2
ldr r14,[sp,#4*(14)]
vadd.i32 q11,q7,q12 @ counter+2
str r11, [sp,#4*(16+15)]
mov r11,#10
add r12,r12,#3 @ counter+3
b .Loop_neon
.align 4
.Loop_neon:
subs r11,r11,#1
vadd.i32 q0,q0,q1
add r0,r0,r4
vadd.i32 q4,q4,q5
mov r12,r12,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r5
veor q3,q3,q0
mov r10,r10,ror#16
veor q7,q7,q4
eor r12,r12,r0,ror#16
veor q11,q11,q8
eor r10,r10,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r12
vrev32.16 q7,q7
mov r4,r4,ror#20
vrev32.16 q11,q11
add r9,r9,r10
vadd.i32 q2,q2,q3
mov r5,r5,ror#20
vadd.i32 q6,q6,q7
eor r4,r4,r8,ror#20
vadd.i32 q10,q10,q11
eor r5,r5,r9,ror#20
veor q12,q1,q2
add r0,r0,r4
veor q13,q5,q6
mov r12,r12,ror#24
veor q14,q9,q10
add r1,r1,r5
vshr.u32 q1,q12,#20
mov r10,r10,ror#24
vshr.u32 q5,q13,#20
eor r12,r12,r0,ror#24
vshr.u32 q9,q14,#20
eor r10,r10,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r12
vsli.32 q5,q13,#12
mov r4,r4,ror#25
vsli.32 q9,q14,#12
add r9,r9,r10
vadd.i32 q0,q0,q1
mov r5,r5,ror#25
vadd.i32 q4,q4,q5
str r10,[sp,#4*(16+13)]
vadd.i32 q8,q8,q9
ldr r10,[sp,#4*(16+15)]
veor q12,q3,q0
eor r4,r4,r8,ror#25
veor q13,q7,q4
eor r5,r5,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+8)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+10)]
vshr.u32 q7,q13,#24
add r2,r2,r6
vshr.u32 q11,q14,#24
mov r14,r14,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+9)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+11)]
vsli.32 q11,q14,#8
add r3,r3,r7
vadd.i32 q2,q2,q3
mov r10,r10,ror#16
vadd.i32 q6,q6,q7
eor r14,r14,r2,ror#16
vadd.i32 q10,q10,q11
eor r10,r10,r3,ror#16
veor q12,q1,q2
add r8,r8,r14
veor q13,q5,q6
mov r6,r6,ror#20
veor q14,q9,q10
add r9,r9,r10
vshr.u32 q1,q12,#25
mov r7,r7,ror#20
vshr.u32 q5,q13,#25
eor r6,r6,r8,ror#20
vshr.u32 q9,q14,#25
eor r7,r7,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r6
vsli.32 q5,q13,#7
mov r14,r14,ror#24
vsli.32 q9,q14,#7
add r3,r3,r7
vext.8 q2,q2,q2,#8
mov r10,r10,ror#24
vext.8 q6,q6,q6,#8
eor r14,r14,r2,ror#24
vext.8 q10,q10,q10,#8
eor r10,r10,r3,ror#24
vext.8 q1,q1,q1,#4
add r8,r8,r14
vext.8 q5,q5,q5,#4
mov r6,r6,ror#25
vext.8 q9,q9,q9,#4
add r9,r9,r10
vext.8 q3,q3,q3,#12
mov r7,r7,ror#25
vext.8 q7,q7,q7,#12
eor r6,r6,r8,ror#25
vext.8 q11,q11,q11,#12
eor r7,r7,r9,ror#25
vadd.i32 q0,q0,q1
add r0,r0,r5
vadd.i32 q4,q4,q5
mov r10,r10,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r6
veor q3,q3,q0
mov r12,r12,ror#16
veor q7,q7,q4
eor r10,r10,r0,ror#16
veor q11,q11,q8
eor r12,r12,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r10
vrev32.16 q7,q7
mov r5,r5,ror#20
vrev32.16 q11,q11
add r9,r9,r12
vadd.i32 q2,q2,q3
mov r6,r6,ror#20
vadd.i32 q6,q6,q7
eor r5,r5,r8,ror#20
vadd.i32 q10,q10,q11
eor r6,r6,r9,ror#20
veor q12,q1,q2
add r0,r0,r5
veor q13,q5,q6
mov r10,r10,ror#24
veor q14,q9,q10
add r1,r1,r6
vshr.u32 q1,q12,#20
mov r12,r12,ror#24
vshr.u32 q5,q13,#20
eor r10,r10,r0,ror#24
vshr.u32 q9,q14,#20
eor r12,r12,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r10
vsli.32 q5,q13,#12
mov r5,r5,ror#25
vsli.32 q9,q14,#12
str r10,[sp,#4*(16+15)]
vadd.i32 q0,q0,q1
ldr r10,[sp,#4*(16+13)]
vadd.i32 q4,q4,q5
add r9,r9,r12
vadd.i32 q8,q8,q9
mov r6,r6,ror#25
veor q12,q3,q0
eor r5,r5,r8,ror#25
veor q13,q7,q4
eor r6,r6,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+10)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+8)]
vshr.u32 q7,q13,#24
add r2,r2,r7
vshr.u32 q11,q14,#24
mov r10,r10,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+11)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+9)]
vsli.32 q11,q14,#8
add r3,r3,r4
vadd.i32 q2,q2,q3
mov r14,r14,ror#16
vadd.i32 q6,q6,q7
eor r10,r10,r2,ror#16
vadd.i32 q10,q10,q11
eor r14,r14,r3,ror#16
veor q12,q1,q2
add r8,r8,r10
veor q13,q5,q6
mov r7,r7,ror#20
veor q14,q9,q10
add r9,r9,r14
vshr.u32 q1,q12,#25
mov r4,r4,ror#20
vshr.u32 q5,q13,#25
eor r7,r7,r8,ror#20
vshr.u32 q9,q14,#25
eor r4,r4,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r7
vsli.32 q5,q13,#7
mov r10,r10,ror#24
vsli.32 q9,q14,#7
add r3,r3,r4
vext.8 q2,q2,q2,#8
mov r14,r14,ror#24
vext.8 q6,q6,q6,#8
eor r10,r10,r2,ror#24
vext.8 q10,q10,q10,#8
eor r14,r14,r3,ror#24
vext.8 q1,q1,q1,#12
add r8,r8,r10
vext.8 q5,q5,q5,#12
mov r7,r7,ror#25
vext.8 q9,q9,q9,#12
add r9,r9,r14
vext.8 q3,q3,q3,#4
mov r4,r4,ror#25
vext.8 q7,q7,q7,#4
eor r7,r7,r8,ror#25
vext.8 q11,q11,q11,#4
eor r4,r4,r9,ror#25
bne .Loop_neon
add r11,sp,#32
vld1.32 {q12,q13},[sp] @ load key material
vld1.32 {q14,q15},[r11]
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
ldr r12,[sp,#4*(32+1)] @ load inp
ldr r14,[sp,#4*(32+0)] @ load out
vadd.i32 q0,q0,q12 @ accumulate key material
vadd.i32 q4,q4,q12
vadd.i32 q8,q8,q12
vldr d24,[sp,#4*(16+0)] @ one
vadd.i32 q1,q1,q13
vadd.i32 q5,q5,q13
vadd.i32 q9,q9,q13
vldr d26,[sp,#4*(16+2)] @ two
vadd.i32 q2,q2,q14
vadd.i32 q6,q6,q14
vadd.i32 q10,q10,q14
vadd.i32 d14,d14,d24 @ counter+1
vadd.i32 d22,d22,d26 @ counter+2
vadd.i32 q3,q3,q15
vadd.i32 q7,q7,q15
vadd.i32 q11,q11,q15
cmp r11,#64*4
blo .Ltail_neon
vld1.8 {q12,q13},[r12]! @ load input
mov r11,sp
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12 @ xor with input
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
vst1.8 {q0,q1},[r14]! @ store output
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vld1.32 {q0,q1},[r11]! @ load for next iteration
veor d25,d25,d25
vldr d24,[sp,#4*(16+4)] @ four
veor q9,q9,q13
vld1.32 {q2,q3},[r11]
veor q10,q10,q14
vst1.8 {q4,q5},[r14]!
veor q11,q11,q15
vst1.8 {q6,q7},[r14]!
vadd.i32 d6,d6,d24 @ next counter value
vldr d24,[sp,#4*(16+0)] @ one
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
vst1.8 {q8,q9},[r14]!
add r1,r1,r9
ldr r9,[r12,#-12]
vst1.8 {q10,q11},[r14]!
add r2,r2,r10
ldr r10,[r12,#-8]
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8 @ xor with input
add r8,sp,#4*(4)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r5,r5,r9
ldr r9,[r12,#-12]
add r6,r6,r10
ldr r10,[r12,#-8]
add r7,r7,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
add r8,sp,#4*(8)
eor r5,r5,r9
str r4,[r14],#16 @ store output
eor r6,r6,r10
str r5,[r14,#-12]
eor r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r1,r1,r9
ldr r9,[r12,#-12]
# ifdef __thumb2__
it hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
add r2,r2,r10
ldr r10,[r12,#-8]
# ifdef __thumb2__
it hi
# endif
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8
add r8,sp,#4*(12)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r8,r8,#4 @ next counter value
add r5,r5,r9
str r8,[sp,#4*(12)] @ save next counter value
ldr r8,[r12],#16 @ load input
add r6,r6,r10
add r4,r4,#3 @ counter+3
ldr r9,[r12,#-12]
add r7,r7,r11
ldr r10,[r12,#-8]
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
# ifdef __thumb2__
it hi
# endif
ldrhi r8,[sp,#4*(32+2)] @ re-load len
eor r5,r5,r9
eor r6,r6,r10
str r4,[r14],#16 @ store output
eor r7,r7,r11
str r5,[r14,#-12]
sub r11,r8,#64*4 @ len-=64*4
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi .Loop_neon_outer
b .Ldone_neon
.align 4
.Lbreak_neon:
@ harmonize NEON and integer-only stack frames: load data
@ from NEON frame, but save to integer-only one; distance
@ between the two is 4*(32+4+16-32)=4*(20).
str r11, [sp,#4*(20+32+2)] @ save len
add r11,sp,#4*(32+4)
str r12, [sp,#4*(20+32+1)] @ save inp
str r14, [sp,#4*(20+32+0)] @ save out
ldr r12,[sp,#4*(16+10)]
ldr r14,[sp,#4*(16+11)]
vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement
str r12,[sp,#4*(20+16+10)] @ copy "rx"
str r14,[sp,#4*(20+16+11)] @ copy "rx"
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(20+16+15)]
add r11,sp,#4*(20)
vst1.32 {q0,q1},[r11]! @ copy key
add sp,sp,#4*(20) @ switch frame
vst1.32 {q2,q3},[r11]
mov r11,#10
b .Loop @ go integer-only
.align 4
.Ltail_neon:
cmp r11,#64*3
bhs .L192_or_more_neon
cmp r11,#64*2
bhs .L128_or_more_neon
cmp r11,#64*1
bhs .L64_or_more_neon
add r8,sp,#4*(8)
vst1.8 {q0,q1},[sp]
add r10,sp,#4*(0)
vst1.8 {q2,q3},[r8]
b .Loop_tail_neon
.align 4
.L64_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
veor q2,q2,q14
veor q3,q3,q15
vst1.8 {q0,q1},[r14]!
vst1.8 {q2,q3},[r14]!
beq .Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q4,q5},[sp]
add r10,sp,#4*(0)
vst1.8 {q6,q7},[r8]
sub r11,r11,#64*1 @ len-=64*1
b .Loop_tail_neon
.align 4
.L128_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vst1.8 {q0,q1},[r14]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vst1.8 {q4,q5},[r14]!
vst1.8 {q6,q7},[r14]!
beq .Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q8,q9},[sp]
add r10,sp,#4*(0)
vst1.8 {q10,q11},[r8]
sub r11,r11,#64*2 @ len-=64*2
b .Loop_tail_neon
.align 4
.L192_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q0,q1},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vst1.8 {q2,q3},[r14]!
veor q9,q9,q13
vst1.8 {q4,q5},[r14]!
veor q10,q10,q14
vst1.8 {q6,q7},[r14]!
veor q11,q11,q15
vst1.8 {q8,q9},[r14]!
vst1.8 {q10,q11},[r14]!
beq .Ldone_neon
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(4)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r6,r6,r10
add r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7}
add r0,sp,#4*(16+8)
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(12)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r4,r4,#3 @ counter+3
add r6,r6,r10
add r7,r7,r11
ldr r11,[sp,#4*(32+2)] @ re-load len
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7}
add r10,sp,#4*(0)
sub r11,r11,#64*3 @ len-=64*3
.Loop_tail_neon:
ldrb r8,[r10],#1 @ read buffer on stack
ldrb r9,[r12],#1 @ read input
subs r11,r11,#1
eor r8,r8,r9
strb r8,[r14],#1 @ store output
bne .Loop_tail_neon
.Ldone_neon:
add sp,sp,#4*(32+4)
vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15}
add sp,sp,#4*(16+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
.size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Chigozieworldwide/Multi | 49,024 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha512-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl _sha512_block_data_order_nohw
.private_extern _sha512_block_data_order_nohw
.align 6
_sha512_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,LK512@PAGE
add x30,x30,LK512@PAGEOFF
stp x0,x2,[x29,#96]
Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section __TEXT,__const
.align 6
LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl _sha512_block_data_order_hw
.private_extern _sha512_block_data_order_hw
.align 6
_sha512_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,LK512@PAGE
add x3,x3,LK512@PAGEOFF
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b Loop_hw
.align 4
Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Chigozieworldwide/Multi | 21,653 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _gcm_init_clmul
.private_extern _gcm_init_clmul
.p2align 4
_gcm_init_clmul:
_CET_ENDBR
L$_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand L$0x1c2_polynomial(%rip),%xmm5
pxor %xmm5,%xmm2
pshufd $78,%xmm2,%xmm6
movdqa %xmm2,%xmm0
pxor %xmm2,%xmm6
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,0(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%rdi)
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm5
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm5,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm5,%xmm3
movdqu %xmm5,48(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,64(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,80(%rdi)
ret
.globl _gcm_ghash_clmul
.private_extern _gcm_ghash_clmul
.p2align 5
_gcm_ghash_clmul:
_CET_ENDBR
L$_ghash_clmul:
movdqa L$bswap_mask(%rip),%xmm10
movdqu (%rdi),%xmm0
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194
subq $0x10,%rcx
jz L$odd_tail
movdqu 16(%rsi),%xmm6
cmpq $0x30,%rcx
jb L$skip4x
subq $0x30,%rcx
movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15
movdqu 48(%rdx),%xmm3
movdqu 32(%rdx),%xmm11
.byte 102,65,15,56,0,218
.byte 102,69,15,56,0,218
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm11,%xmm12
.byte 102,68,15,58,68,222,0
.byte 102,68,15,58,68,238,17
.byte 102,68,15,58,68,231,16
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
xorps %xmm12,%xmm4
movdqu 16(%rdx),%xmm11
movdqu 0(%rdx),%xmm8
.byte 102,69,15,56,0,218
.byte 102,69,15,56,0,194
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm8,%xmm0
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,69,15,58,68,238,17
.byte 102,68,15,58,68,231,0
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jc L$tail4x
jmp L$mod4_loop
.p2align 5
L$mod4_loop:
.byte 102,65,15,58,68,199,0
xorps %xmm12,%xmm4
movdqu 48(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,65,15,58,68,207,17
xorps %xmm3,%xmm0
movdqu 32(%rdx),%xmm3
movdqa %xmm11,%xmm13
.byte 102,68,15,58,68,199,16
pshufd $78,%xmm11,%xmm12
xorps %xmm5,%xmm1
pxor %xmm11,%xmm12
.byte 102,65,15,56,0,218
movups 32(%rsi),%xmm7
xorps %xmm4,%xmm8
.byte 102,68,15,58,68,218,0
pshufd $78,%xmm3,%xmm4
pxor %xmm0,%xmm8
movdqa %xmm3,%xmm5
pxor %xmm1,%xmm8
pxor %xmm3,%xmm4
movdqa %xmm8,%xmm9
.byte 102,68,15,58,68,234,17
pslldq $8,%xmm8
psrldq $8,%xmm9
pxor %xmm8,%xmm0
movdqa L$7_mask(%rip),%xmm8
pxor %xmm9,%xmm1
.byte 102,76,15,110,200
pand %xmm0,%xmm8
.byte 102,69,15,56,0,200
pxor %xmm0,%xmm9
.byte 102,68,15,58,68,231,0
psllq $57,%xmm9
movdqa %xmm9,%xmm8
pslldq $8,%xmm9
.byte 102,15,58,68,222,0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
movdqu 0(%rdx),%xmm8
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,238,17
xorps %xmm11,%xmm3
movdqu 16(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,15,58,68,231,16
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
.byte 102,69,15,56,0,194
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
movdqa %xmm11,%xmm13
pxor %xmm12,%xmm4
pshufd $78,%xmm11,%xmm12
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm1
.byte 102,69,15,58,68,238,17
xorps %xmm11,%xmm3
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,68,15,58,68,231,0
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jnc L$mod4_loop
L$tail4x:
.byte 102,65,15,58,68,199,0
.byte 102,65,15,58,68,207,17
.byte 102,68,15,58,68,199,16
xorps %xmm12,%xmm4
xorps %xmm3,%xmm0
xorps %xmm5,%xmm1
pxor %xmm0,%xmm1
pxor %xmm4,%xmm8
pxor %xmm1,%xmm8
pxor %xmm0,%xmm1
movdqa %xmm8,%xmm9
psrldq $8,%xmm8
pslldq $8,%xmm9
pxor %xmm8,%xmm1
pxor %xmm9,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
addq $0x40,%rcx
jz L$done
movdqu 32(%rsi),%xmm7
subq $0x10,%rcx
jz L$odd_tail
L$skip4x:
movdqu (%rdx),%xmm8
movdqu 16(%rdx),%xmm3
.byte 102,69,15,56,0,194
.byte 102,65,15,56,0,218
pxor %xmm8,%xmm0
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
leaq 32(%rdx),%rdx
nop
subq $0x20,%rcx
jbe L$even_tail
nop
jmp L$mod_loop
.p2align 5
L$mod_loop:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
movdqu (%rdx),%xmm9
pxor %xmm0,%xmm8
.byte 102,69,15,56,0,202
movdqu 16(%rdx),%xmm3
pxor %xmm1,%xmm8
pxor %xmm9,%xmm1
pxor %xmm8,%xmm4
.byte 102,65,15,56,0,218
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm3,%xmm5
movdqa %xmm0,%xmm9
movdqa %xmm0,%xmm8
psllq $5,%xmm0
pxor %xmm0,%xmm8
.byte 102,15,58,68,218,0
psllq $1,%xmm0
pxor %xmm8,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm8
pslldq $8,%xmm0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pshufd $78,%xmm5,%xmm4
pxor %xmm8,%xmm1
pxor %xmm5,%xmm4
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,234,17
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
pxor %xmm9,%xmm0
leaq 32(%rdx),%rdx
psrlq $1,%xmm0
.byte 102,15,58,68,231,0
pxor %xmm1,%xmm0
subq $0x20,%rcx
ja L$mod_loop
L$even_tail:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
pxor %xmm0,%xmm8
pxor %xmm1,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testq %rcx,%rcx
jnz L$done
L$odd_tail:
movdqu (%rdx),%xmm8
.byte 102,69,15,56,0,194
pxor %xmm8,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,223,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
L$done:
.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
ret
.globl _gcm_init_avx
.private_extern _gcm_init_avx
.p2align 5
_gcm_init_avx:
_CET_ENDBR
vzeroupper
vmovdqu (%rsi),%xmm2
vpshufd $78,%xmm2,%xmm2
vpshufd $255,%xmm2,%xmm4
vpsrlq $63,%xmm2,%xmm3
vpsllq $1,%xmm2,%xmm2
vpxor %xmm5,%xmm5,%xmm5
vpcmpgtd %xmm4,%xmm5,%xmm5
vpslldq $8,%xmm3,%xmm3
vpor %xmm3,%xmm2,%xmm2
vpand L$0x1c2_polynomial(%rip),%xmm5,%xmm5
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm2,%xmm2,%xmm6
vmovdqa %xmm2,%xmm0
vpxor %xmm2,%xmm6,%xmm6
movq $4,%r10
jmp L$init_start_avx
.p2align 5
L$init_loop_avx:
vpalignr $8,%xmm3,%xmm4,%xmm5
vmovdqu %xmm5,-16(%rdi)
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
L$init_start_avx:
vmovdqa %xmm0,%xmm5
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
vpshufd $78,%xmm5,%xmm3
vpshufd $78,%xmm0,%xmm4
vpxor %xmm5,%xmm3,%xmm3
vmovdqu %xmm5,0(%rdi)
vpxor %xmm0,%xmm4,%xmm4
vmovdqu %xmm0,16(%rdi)
leaq 48(%rdi),%rdi
subq $1,%r10
jnz L$init_loop_avx
vpalignr $8,%xmm4,%xmm3,%xmm5
vmovdqu %xmm5,-16(%rdi)
vzeroupper
ret
.globl _gcm_ghash_avx
.private_extern _gcm_ghash_avx
.p2align 5
_gcm_ghash_avx:
_CET_ENDBR
vzeroupper
vmovdqu (%rdi),%xmm10
leaq L$0x1c2_polynomial(%rip),%r10
leaq 64(%rsi),%rsi
vmovdqu L$bswap_mask(%rip),%xmm13
vpshufb %xmm13,%xmm10,%xmm10
cmpq $0x80,%rcx
jb L$short_avx
subq $0x80,%rcx
vmovdqu 112(%rdx),%xmm14
vmovdqu 0-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vmovdqu 32-64(%rsi),%xmm7
vpunpckhqdq %xmm14,%xmm14,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm14,%xmm9,%xmm9
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 80(%rdx),%xmm14
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 48-64(%rsi),%xmm6
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 64(%rdx),%xmm15
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 48(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 32(%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 16(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu (%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
leaq 128(%rdx),%rdx
cmpq $0x80,%rcx
jb L$tail_avx
vpxor %xmm10,%xmm15,%xmm15
subq $0x80,%rcx
jmp L$oop8x_avx
.p2align 5
L$oop8x_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 112(%rdx),%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpxor %xmm15,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11
vmovdqu 0-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12
vmovdqu 32-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm3,%xmm10,%xmm10
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vxorps %xmm4,%xmm11,%xmm11
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm5,%xmm12,%xmm12
vxorps %xmm15,%xmm8,%xmm8
vmovdqu 80(%rdx),%xmm14
vpxor %xmm10,%xmm12,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm11,%xmm12,%xmm12
vpslldq $8,%xmm12,%xmm9
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vpsrldq $8,%xmm12,%xmm12
vpxor %xmm9,%xmm10,%xmm10
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vxorps %xmm12,%xmm11,%xmm11
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 64(%rdx),%xmm15
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vxorps %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vmovdqu 48(%rdx),%xmm14
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 32(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vxorps %xmm12,%xmm10,%xmm10
vmovdqu 16(%rdx),%xmm14
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vxorps %xmm11,%xmm12,%xmm12
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu (%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm12,%xmm15,%xmm15
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
vpxor %xmm10,%xmm15,%xmm15
leaq 128(%rdx),%rdx
subq $0x80,%rcx
jnc L$oop8x_avx
addq $0x80,%rcx
jmp L$tail_no_xor_avx
.p2align 5
L$short_avx:
vmovdqu -16(%rdx,%rcx,1),%xmm14
leaq (%rdx,%rcx,1),%rdx
vmovdqu 0-64(%rsi),%xmm6
vmovdqu 32-64(%rsi),%xmm7
vpshufb %xmm13,%xmm14,%xmm15
vmovdqa %xmm0,%xmm3
vmovdqa %xmm1,%xmm4
vmovdqa %xmm2,%xmm5
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -32(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -48(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 80-64(%rsi),%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -64(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -80(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 96-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 128-64(%rsi),%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -96(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -112(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 144-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovq 184-64(%rsi),%xmm7
subq $0x10,%rcx
jmp L$tail_avx
.p2align 5
L$tail_avx:
vpxor %xmm10,%xmm15,%xmm15
L$tail_no_xor_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu (%r10),%xmm12
vpxor %xmm0,%xmm3,%xmm10
vpxor %xmm1,%xmm4,%xmm11
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm10,%xmm5,%xmm5
vpxor %xmm11,%xmm5,%xmm5
vpslldq $8,%xmm5,%xmm9
vpsrldq $8,%xmm5,%xmm5
vpxor %xmm9,%xmm10,%xmm10
vpxor %xmm5,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm11,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
cmpq $0,%rcx
jne L$short_avx
vpshufb %xmm13,%xmm10,%xmm10
vmovdqu %xmm10,(%rdi)
vzeroupper
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$0x1c2_polynomial:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
L$7_mask:
.long 7,0,7,0
.p2align 6
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
Chigozieworldwide/Multi | 40,421 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha-armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.align 5
.Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
.Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl ChaCha20_ctr32_nohw
.hidden ChaCha20_ctr32_nohw
.type ChaCha20_ctr32_nohw,%function
.align 5
ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
.Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
.Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,.Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo .Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi .Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
.Ltail:
add x2,x2,#64
.Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
.Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,.Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
.globl ChaCha20_ctr32_neon
.hidden ChaCha20_ctr32_neon
.type ChaCha20_ctr32_neon,%function
.align 5
ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs .L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
.Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
.Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,.Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo .Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi .Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo .Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq .Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo .Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq .Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo .Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq .Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b .Last_neon
.Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b .Last_neon
.Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b .Last_neon
.align 4
.Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
.Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,.Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
.Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon
.type ChaCha20_512_neon,%function
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
.L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
.Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
.Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,.Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
.Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,.Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs .Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq .Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs .Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b .Loop_outer
.Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_512_neon,.-ChaCha20_512_neon
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Chigozieworldwide/Multi | 49,242 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha512-armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,%function
.align 6
sha512_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,.LK512
add x30,x30,:lo12:.LK512
stp x0,x2,[x29,#96]
.Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
.Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,.Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne .Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
.section .rodata
.align 6
.type .LK512,%object
.LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.size .LK512,.-.LK512
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha512_block_data_order_hw
.hidden sha512_block_data_order_hw
.type sha512_block_data_order_hw,%function
.align 6
sha512_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,.LK512
add x3,x3,:lo12:.LK512
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b .Loop_hw
.align 4
.Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,.Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
.size sha512_block_data_order_hw,.-sha512_block_data_order_hw
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Chigozieworldwide/Multi | 74,317 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/chacha20_poly1305_armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.align 7
.Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.Linc:
.long 1,2,3,4
.Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.type .Lpoly_hash_ad_internal,%function
.align 6
.Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, .Lpoly_hash_intro
ret
.Lpoly_hash_intro:
cmp x4, #16
b.lt .Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b .Lpoly_hash_ad_internal
.Lpoly_hash_ad_tail:
cbz x4, .Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
.Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge .Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lpoly_hash_ad_ret:
ret
.cfi_endproc
.size .Lpoly_hash_ad_internal, .-.Lpoly_hash_ad_internal
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.hidden chacha20_poly1305_seal
.type chacha20_poly1305_seal,%function
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le .Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
.Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi .Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl .Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le .Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
.Lseal_main_loop:
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
.Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge .Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt .Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le .Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b .Lseal_main_loop
.Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt .Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b .Lseal_tail
.Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt .Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b .Lseal_tail_64
.Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, .Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, .Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // .Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
.Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt .Lseal_tail16_compose_extra_in
add x3, x3, x12
.Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt .Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
.Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt .Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lseal_hash_extra:
cbz x4, .Lseal_finalize
.Lseal_hash_extra_loop:
cmp x4, #16
b.lt .Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b .Lseal_hash_extra_loop
.Lseal_hash_extra_tail:
cbz x4, .Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
.Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt .Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
.Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
.Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi .Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl .Lpoly_hash_ad_internal
b .Lseal_tail
.cfi_endproc
.size chacha20_poly1305_seal,.-chacha20_poly1305_seal
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.hidden chacha20_poly1305_open
.type chacha20_poly1305_open,%function
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le .Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
.Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi .Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl .Lpoly_hash_ad_internal
.Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
.Lopen_main_loop:
cmp x2, #192
b.lt .Lopen_tail
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, .Lopen_main_loop_rounds_short
.align 5
.Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt .Lopen_main_loop_rounds
subs x6, x6, #1
b.ge .Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt .Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt .Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b .Lopen_main_loop
.Lopen_tail:
cbz x2, .Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le .Lopen_tail_64
cmp x2, #128
b.le .Lopen_tail_128
.Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, .Lopen_tail_192_rounds_no_hash
.Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt .Lopen_tail_192_rounds
subs x6, x6, #1
b.ge .Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
.Lopen_tail_192_hash:
cbz x4, .Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b .Lopen_tail_192_hash
.Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b .Lopen_tail_64_store
.Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
.Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt .Lopen_tail_128_rounds
cbz x4, .Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b .Lopen_tail_128_rounds
.Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b .Lopen_tail_64_store
.Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
.Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt .Lopen_tail_64_rounds
cbz x4, .Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b .Lopen_tail_64_rounds
.Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
.Lopen_tail_64_store:
cmp x2, #16
b.lt .Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b .Lopen_tail_64_store
.Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, .Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
.Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt .Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
.Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt .Lopen_tail_16_store
.Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
.Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
.Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi .Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl .Lpoly_hash_ad_internal
.Lopen_128_store:
cmp x2, #64
b.lt .Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
.Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
.Lopen_128_hash_64:
cbz x4, .Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b .Lopen_128_hash_64
.cfi_endproc
.size chacha20_poly1305_open,.-chacha20_poly1305_open
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Chigozieworldwide/Multi | 4,266 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghashv8-armx-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,%function
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,%function
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Chigozieworldwide/Multi | 18,647 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/x86_64-mont-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _bn_mul_mont_nohw
.private_extern _bn_mul_mont_nohw
.p2align 4
_bn_mul_mont_nohw:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -16(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
jmp L$mul_page_walk_done
.p2align 4
L$mul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
L$mul_page_walk_done:
movq %rax,8(%rsp,%r9,8)
L$mul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$1st_enter
.p2align 4
L$1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne L$1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp L$outer
.p2align 4
L$outer:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$inner_enter
.p2align 4
L$inner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$inner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne L$inner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb L$outer
xorq %r14,%r14
movq (%rsp),%rax
movq %r9,%r15
.p2align 4
L$sub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsp,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz L$sub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
L$copy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r9,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz L$copy
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul_epilogue:
ret
.globl _bn_mul4x_mont
.private_extern _bn_mul4x_mont
.p2align 4
_bn_mul4x_mont:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -32(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul4x_page_walk
jmp L$mul4x_page_walk_done
L$mul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul4x_page_walk
L$mul4x_page_walk_done:
movq %rax,8(%rsp,%r9,8)
L$mul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp L$1st4x
.p2align 4
L$1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb L$1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.p2align 2
L$outer4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp L$inner4x
.p2align 4
L$inner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb L$inner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb L$outer4x
movq 16(%rsp,%r9,8),%rdi
leaq -4(%r9),%r15
movq 0(%rsp),%rax
movq 8(%rsp),%rdx
shrq $2,%r15
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
L$sub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz L$sub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
pxor %xmm0,%xmm0
.byte 102,72,15,110,224
pcmpeqd %xmm5,%xmm5
pshufd $0,%xmm4,%xmm4
movq %r9,%r15
pxor %xmm4,%xmm5
shrq $2,%r15
xorl %eax,%eax
jmp L$copy4x
.p2align 4
L$copy4x:
movdqa (%rsp,%rax,1),%xmm1
movdqu (%rdi,%rax,1),%xmm2
pand %xmm4,%xmm1
pand %xmm5,%xmm2
movdqa 16(%rsp,%rax,1),%xmm3
movdqa %xmm0,(%rsp,%rax,1)
por %xmm2,%xmm1
movdqu 16(%rdi,%rax,1),%xmm2
movdqu %xmm1,(%rdi,%rax,1)
pand %xmm4,%xmm3
pand %xmm5,%xmm2
movdqa %xmm0,16(%rsp,%rax,1)
por %xmm2,%xmm3
movdqu %xmm3,16(%rdi,%rax,1)
leaq 32(%rax),%rax
decq %r15
jnz L$copy4x
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul4x_epilogue:
ret
.globl _bn_sqr8x_mont
.private_extern _bn_sqr8x_mont
.p2align 5
_bn_sqr8x_mont:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqr8x_prologue:
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,2),%r11
movq %rsp,%rbp
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$sqr8x_sp_alt
subq %r11,%rbp
leaq -64(%rbp,%r9,2),%rbp
jmp L$sqr8x_sp_done
.p2align 5
L$sqr8x_sp_alt:
leaq 4096-64(,%r9,2),%r10
leaq -64(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$sqr8x_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$sqr8x_page_walk
jmp L$sqr8x_page_walk_done
.p2align 4
L$sqr8x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$sqr8x_page_walk
L$sqr8x_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$sqr8x_body:
.byte 102,72,15,110,209
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
testq %rdx,%rdx
jz L$sqr8x_nox
call _bn_sqrx8x_internal
leaq (%r8,%rcx,1),%rbx
movq %rcx,%r9
movq %rcx,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5
L$sqr8x_nox:
call _bn_sqr8x_internal
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5
L$sqr8x_sub:
movq 0(%rbx),%r12
movq 8(%rbx),%r13
movq 16(%rbx),%r14
movq 24(%rbx),%r15
leaq 32(%rbx),%rbx
sbbq 0(%rbp),%r12
sbbq 8(%rbp),%r13
sbbq 16(%rbp),%r14
sbbq 24(%rbp),%r15
leaq 32(%rbp),%rbp
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$sqr8x_cond_copy
.p2align 5
L$sqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz L$sqr8x_cond_copy
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$sqr8x_epilogue:
ret
.globl _bn_mulx4x_mont
.private_extern _bn_mulx4x_mont
.p2align 5
_bn_mulx4x_mont:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx4x_prologue:
shll $3,%r9d
xorq %r10,%r10
subq %r9,%r10
movq (%r8),%r8
leaq -72(%rsp,%r10,1),%rbp
andq $-128,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
jmp L$mulx4x_page_walk_done
.p2align 4
L$mulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
L$mulx4x_page_walk_done:
leaq (%rdx,%r9,1),%r10
movq %r9,0(%rsp)
shrq $5,%r9
movq %r10,16(%rsp)
subq $1,%r9
movq %r8,24(%rsp)
movq %rdi,32(%rsp)
movq %rax,40(%rsp)
movq %r9,48(%rsp)
jmp L$mulx4x_body
.p2align 5
L$mulx4x_body:
leaq 8(%rdx),%rdi
movq (%rdx),%rdx
leaq 64+32(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r14
addq %rax,%r11
movq %rdi,8(%rsp)
mulxq 16(%rsi),%r12,%r13
adcq %r14,%r12
adcq $0,%r13
movq %r8,%rdi
imulq 24(%rsp),%r8
xorq %rbp,%rbp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%rdi
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00
movq 48(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp L$mulx4x_1st
.p2align 5
L$mulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_1st
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
addq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
jmp L$mulx4x_outer
.p2align 5
L$mulx4x_outer:
movq (%rdi),%rdx
leaq 8(%rdi),%rdi
subq %rax,%rsi
movq %r15,(%rbx)
leaq 64+32(%rsp),%rbx
subq %rax,%rcx
mulxq 0(%rsi),%r8,%r11
xorl %ebp,%ebp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
adoxq -16(%rbx),%r12
adcxq %rbp,%r13
adoxq %rbp,%r13
movq %rdi,8(%rsp)
movq %r8,%r15
imulq 24(%rsp),%r8
xorl %ebp,%ebp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
adcxq %rax,%r13
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
adoxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
leaq 32(%rcx),%rcx
adcxq %rax,%r12
adoxq %rbp,%r15
movq 48(%rsp),%rdi
movq %r12,-16(%rbx)
jmp L$mulx4x_inner
.p2align 5
L$mulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-32(%rbx)
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_inner
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
subq 0(%rbx),%rbp
adcq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
cmpq 16(%rsp),%rdi
jne L$mulx4x_outer
leaq 64(%rsp),%rbx
subq %rax,%rcx
negq %r15
movq %rax,%rdx
shrq $3+2,%rax
movq 32(%rsp),%rdi
jmp L$mulx4x_sub
.p2align 5
L$mulx4x_sub:
movq 0(%rbx),%r11
movq 8(%rbx),%r12
movq 16(%rbx),%r13
movq 24(%rbx),%r14
leaq 32(%rbx),%rbx
sbbq 0(%rcx),%r11
sbbq 8(%rcx),%r12
sbbq 16(%rcx),%r13
sbbq 24(%rcx),%r14
leaq 32(%rcx),%rcx
movq %r11,0(%rdi)
movq %r12,8(%rdi)
movq %r13,16(%rdi)
movq %r14,24(%rdi)
leaq 32(%rdi),%rdi
decq %rax
jnz L$mulx4x_sub
sbbq $0,%r15
leaq 64(%rsp),%rbx
subq %rdx,%rdi
.byte 102,73,15,110,207
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$mulx4x_cond_copy
.p2align 5
L$mulx4x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
subq $32,%rdx
jnz L$mulx4x_cond_copy
movq %rdx,(%rbx)
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mulx4x_epilogue:
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 4
#endif
|
Chigozieworldwide/Multi | 30,641 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/armv8-mont-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.text
.globl _bn_mul_mont_nohw
.private_extern _bn_mul_mont_nohw
.align 5
_bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _bn_sqr8x_mont
.private_extern _bn_sqr8x_mont
.align 5
_bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _bn_mul4x_mont
.private_extern _bn_mul4x_mont
.align 5
_bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Chigozieworldwide/Multi | 69,148 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha256-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _sha256_block_data_order_nohw
.private_extern _sha256_block_data_order_nohw
.p2align 4
_sha256_block_data_order_nohw:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp L$loop
.p2align 4
L$loop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp L$rounds_16_xx
.p2align 4
L$rounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz L$rounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop
movq 88(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue:
ret
.section __DATA,__const
.p2align 6
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl _sha256_block_data_order_hw
.private_extern _sha256_block_data_order_hw
.p2align 6
_sha256_block_data_order_hw:
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp L$oop_shaext
.p2align 4
L$oop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz L$oop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.globl _sha256_block_data_order_ssse3
.private_extern _sha256_block_data_order_ssse3
.p2align 6
_sha256_block_data_order_ssse3:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp L$loop_ssse3
.p2align 4
L$loop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp L$ssse3_00_47
.p2align 4
L$ssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne L$ssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop_ssse3
movq 88(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_ssse3:
ret
.globl _sha256_block_data_order_avx
.private_extern _sha256_block_data_order_avx
.p2align 6
_sha256_block_data_order_avx:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp L$loop_avx
.p2align 4
L$loop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp L$avx_00_47
.p2align 4
L$avx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne L$avx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop_avx
movq 88(%rsp),%rsi
vzeroupper
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_avx:
ret
#endif
|
Chigozieworldwide/Multi | 4,159 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghashv8-armx-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.def gcm_init_clmul
.type 32
.endef
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl gcm_gmult_clmul
.def gcm_gmult_clmul
.type 32
.endef
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Chigozieworldwide/Multi | 19,873 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesni-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 4
_aesni_encrypt2:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
L$enc_loop2:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop2
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.p2align 4
_aesni_encrypt3:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
L$enc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop3
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.p2align 4
_aesni_encrypt4:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 0x0f,0x1f,0x00
addq $16,%rax
L$enc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop4
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.p2align 4
_aesni_encrypt6:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp L$enc_loop6_enter
.p2align 4
L$enc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
L$enc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.p2align 4
_aesni_encrypt8:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,209
pxor %xmm0,%xmm7
pxor %xmm0,%xmm8
.byte 102,15,56,220,217
pxor %xmm0,%xmm9
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp L$enc_loop8_inner
.p2align 4
L$enc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
L$enc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
L$enc_loop8_enter:
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop8
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
.byte 102,68,15,56,221,192
.byte 102,68,15,56,221,200
ret
.globl _aes_hw_ctr32_encrypt_blocks
.private_extern _aes_hw_ctr32_encrypt_blocks
.p2align 4
_aes_hw_ctr32_encrypt_blocks:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit(%rip)
#endif
cmpq $1,%rdx
jne L$ctr32_bulk
movups (%r8),%xmm2
movups (%rdi),%xmm3
movl 240(%rcx),%edx
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
L$oop_enc1_1:
.byte 102,15,56,220,209
decl %edx
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
jnz L$oop_enc1_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorps %xmm3,%xmm2
pxor %xmm3,%xmm3
movups %xmm2,(%rsi)
xorps %xmm2,%xmm2
jmp L$ctr32_epilogue
.p2align 4
L$ctr32_bulk:
leaq (%rsp),%r11
pushq %rbp
subq $128,%rsp
andq $-16,%rsp
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
movl 12(%r8),%r8d
pxor %xmm0,%xmm2
movl 12(%rcx),%ebp
movdqa %xmm2,0(%rsp)
bswapl %r8d
movdqa %xmm2,%xmm3
movdqa %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
movq %rdx,%r10
movdqa %xmm2,112(%rsp)
leaq 1(%r8),%rax
leaq 2(%r8),%rdx
bswapl %eax
bswapl %edx
xorl %ebp,%eax
xorl %ebp,%edx
.byte 102,15,58,34,216,3
leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
.byte 102,15,58,34,226,3
bswapl %eax
movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
xorl %ebp,%eax
bswapl %r10d
.byte 102,15,58,34,232,3
xorl %ebp,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
movl 240(%rcx),%eax
xorl %ebp,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
xorl %ebp,%r10d
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
xorl %ebp,%r9d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
movdqa 64(%rsp),%xmm6
movdqa 80(%rsp),%xmm7
cmpq $8,%rdx
jb L$ctr32_tail
leaq 128(%rcx),%rcx
subq $8,%rdx
jmp L$ctr32_loop8
.p2align 5
L$ctr32_loop8:
addl $8,%r8d
movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
movl %r8d,%r9d
movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
bswapl %r9d
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %ebp,%r9d
nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
xorl %ebp,%r9d
movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
jb L$ctr32_enc_done
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 176-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 192-128(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 208-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
jmp L$ctr32_enc_done
.p2align 4
L$ctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
movdqu 32(%rdi),%xmm12
pxor %xmm0,%xmm11
movdqu 48(%rdi),%xmm13
pxor %xmm0,%xmm12
movdqu 64(%rdi),%xmm14
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
prefetcht0 448(%rdi)
prefetcht0 512(%rdi)
pxor %xmm0,%xmm15
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
.byte 102,65,15,56,221,237
movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
.byte 102,65,15,56,221,255
movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
movups 16-128(%rcx),%xmm1
.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
movdqa %xmm0,%xmm7
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
subq $8,%rdx
jnc L$ctr32_loop8
addq $8,%rdx
jz L$ctr32_done
leaq -128(%rcx),%rcx
L$ctr32_tail:
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb L$ctr32_loop3
je L$ctr32_loop4
shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
leaq 32-16(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,225
addq $16,%rax
movups (%rdi),%xmm10
.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
movups 32(%rdi),%xmm12
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
call L$enc_loop8_enter
movdqu 48(%rdi),%xmm13
pxor %xmm10,%xmm2
movdqu 64(%rdi),%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
pxor %xmm10,%xmm6
movdqu %xmm5,48(%rsi)
movdqu %xmm6,64(%rsi)
cmpq $6,%rdx
jb L$ctr32_done
movups 80(%rdi),%xmm11
xorps %xmm11,%xmm7
movups %xmm7,80(%rsi)
je L$ctr32_done
movups 96(%rdi),%xmm12
xorps %xmm12,%xmm8
movups %xmm8,96(%rsi)
jmp L$ctr32_done
.p2align 5
L$ctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
jnz L$ctr32_loop4
.byte 102,15,56,221,209
.byte 102,15,56,221,217
movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
.byte 102,15,56,221,233
movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm4,32(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm5,48(%rsi)
jmp L$ctr32_done
.p2align 5
L$ctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
jnz L$ctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
.byte 102,15,56,221,225
movups (%rdi),%xmm10
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
cmpq $2,%rdx
jb L$ctr32_done
movups 16(%rdi),%xmm11
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
je L$ctr32_done
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
L$ctr32_done:
xorps %xmm0,%xmm0
xorl %ebp,%ebp
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movaps %xmm0,0(%rsp)
pxor %xmm8,%xmm8
movaps %xmm0,16(%rsp)
pxor %xmm9,%xmm9
movaps %xmm0,32(%rsp)
pxor %xmm10,%xmm10
movaps %xmm0,48(%rsp)
pxor %xmm11,%xmm11
movaps %xmm0,64(%rsp)
pxor %xmm12,%xmm12
movaps %xmm0,80(%rsp)
pxor %xmm13,%xmm13
movaps %xmm0,96(%rsp)
pxor %xmm14,%xmm14
movaps %xmm0,112(%rsp)
pxor %xmm15,%xmm15
movq -8(%r11),%rbp
leaq (%r11),%rsp
L$ctr32_epilogue:
ret
.globl _aes_hw_set_encrypt_key_base
.private_extern _aes_hw_set_encrypt_key_base
.p2align 4
_aes_hw_set_encrypt_key_base:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je L$14rounds
cmpl $128,%esi
jne L$bad_keybits
L$10rounds:
movl $9,%esi
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call L$key_expansion_128_cold
.byte 102,15,58,223,200,2
call L$key_expansion_128
.byte 102,15,58,223,200,4
call L$key_expansion_128
.byte 102,15,58,223,200,8
call L$key_expansion_128
.byte 102,15,58,223,200,16
call L$key_expansion_128
.byte 102,15,58,223,200,32
call L$key_expansion_128
.byte 102,15,58,223,200,64
call L$key_expansion_128
.byte 102,15,58,223,200,128
call L$key_expansion_128
.byte 102,15,58,223,200,27
call L$key_expansion_128
.byte 102,15,58,223,200,54
call L$key_expansion_128
movups %xmm0,(%rax)
movl %esi,80(%rax)
xorl %eax,%eax
jmp L$enc_key_ret
.p2align 4
L$14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
call L$key_expansion_256a_cold
.byte 102,15,58,223,200,1
call L$key_expansion_256b
.byte 102,15,58,223,202,2
call L$key_expansion_256a
.byte 102,15,58,223,200,2
call L$key_expansion_256b
.byte 102,15,58,223,202,4
call L$key_expansion_256a
.byte 102,15,58,223,200,4
call L$key_expansion_256b
.byte 102,15,58,223,202,8
call L$key_expansion_256a
.byte 102,15,58,223,200,8
call L$key_expansion_256b
.byte 102,15,58,223,202,16
call L$key_expansion_256a
.byte 102,15,58,223,200,16
call L$key_expansion_256b
.byte 102,15,58,223,202,32
call L$key_expansion_256a
.byte 102,15,58,223,200,32
call L$key_expansion_256b
.byte 102,15,58,223,202,64
call L$key_expansion_256a
movups %xmm0,(%rax)
movl %esi,16(%rax)
xorq %rax,%rax
jmp L$enc_key_ret
.p2align 4
L$bad_keybits:
movq $-2,%rax
L$enc_key_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
ret
.p2align 4
L$key_expansion_128:
movups %xmm0,(%rax)
leaq 16(%rax),%rax
L$key_expansion_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.p2align 4
L$key_expansion_256a:
movups %xmm2,(%rax)
leaq 16(%rax),%rax
L$key_expansion_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.p2align 4
L$key_expansion_256b:
movups %xmm0,(%rax)
leaq 16(%rax),%rax
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.globl _aes_hw_set_encrypt_key_alt
.private_extern _aes_hw_set_encrypt_key_alt
.p2align 4
_aes_hw_set_encrypt_key_alt:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je L$14rounds_alt
cmpl $128,%esi
jne L$bad_keybits_alt
movl $9,%esi
movdqa L$key_rotate(%rip),%xmm5
movl $8,%r10d
movdqa L$key_rcon1(%rip),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,(%rdx)
jmp L$oop_key128
.p2align 4
L$oop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leaq 16(%rax),%rax
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%rax)
movdqa %xmm0,%xmm2
decl %r10d
jnz L$oop_key128
movdqa L$key_rcon1b(%rip),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%rax)
movl %esi,96(%rax)
xorl %eax,%eax
jmp L$enc_key_ret_alt
.p2align 4
L$14rounds_alt:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movdqa L$key_rotate(%rip),%xmm5
movdqa L$key_rcon1(%rip),%xmm4
movl $7,%r10d
movdqu %xmm0,0(%rdx)
movdqa %xmm2,%xmm1
movdqu %xmm2,16(%rdx)
jmp L$oop_key256
.p2align 4
L$oop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
decl %r10d
jz L$done_key256
pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%rax)
leaq 32(%rax),%rax
movdqa %xmm2,%xmm1
jmp L$oop_key256
L$done_key256:
movl %esi,16(%rax)
xorl %eax,%eax
jmp L$enc_key_ret_alt
.p2align 4
L$bad_keybits_alt:
movq $-2,%rax
L$enc_key_ret_alt:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$increment32:
.long 6,6,6,0
L$increment64:
.long 1,0,0,0
L$increment1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
L$key_rotate:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
L$key_rotate192:
.long 0x04070605,0x04070605,0x04070605,0x04070605
L$key_rcon1:
.long 1,1,1,1
L$key_rcon1b:
.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
Chigozieworldwide/Multi | 48,645 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha512-x86_64-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,@function
.align 16
sha512_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $128+32,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08
.Lprologue:
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp .Lloop
.align 16
.Lloop:
movq %rbx,%rdi
leaq K512(%rip),%rbp
xorq %rcx,%rdi
movq 0(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 8(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 16(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 24(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 32(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 40(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 48(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 56(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
addq %r14,%rax
movq 64(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 72(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 80(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 88(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 96(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 104(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 112(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 120(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movq 8(%rsp),%r13
movq 112(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 72(%rsp),%r12
addq 0(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 16(%rsp),%r13
movq 120(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 80(%rsp),%r12
addq 8(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 24(%rsp),%r13
movq 0(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 88(%rsp),%r12
addq 16(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 32(%rsp),%r13
movq 8(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 96(%rsp),%r12
addq 24(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 40(%rsp),%r13
movq 16(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 104(%rsp),%r12
addq 32(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 48(%rsp),%r13
movq 24(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 112(%rsp),%r12
addq 40(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 56(%rsp),%r13
movq 32(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 120(%rsp),%r12
addq 48(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 64(%rsp),%r13
movq 40(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 0(%rsp),%r12
addq 56(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
movq 72(%rsp),%r13
movq 48(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 8(%rsp),%r12
addq 64(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 80(%rsp),%r13
movq 56(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 16(%rsp),%r12
addq 72(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 88(%rsp),%r13
movq 64(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 24(%rsp),%r12
addq 80(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 96(%rsp),%r13
movq 72(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 32(%rsp),%r12
addq 88(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 104(%rsp),%r13
movq 80(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 40(%rsp),%r12
addq 96(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 112(%rsp),%r13
movq 88(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 48(%rsp),%r12
addq 104(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 120(%rsp),%r13
movq 96(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 56(%rsp),%r12
addq 112(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 0(%rsp),%r13
movq 104(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 64(%rsp),%r12
addq 120(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
cmpb $0,7(%rbp)
jnz .Lrounds_16_xx
movq 128+0(%rsp),%rdi
addq %r14,%rax
leaq 128(%rsi),%rsi
addq 0(%rdi),%rax
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb .Lloop
movq 152(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
.section .rodata
.align 64
.type K512,@object
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha512_block_data_order_avx
.hidden sha512_block_data_order_avx
.type sha512_block_data_order_avx,@function
.align 64
sha512_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $160,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K512+1280(%rip),%xmm11
vmovdqu 0(%rsi),%xmm0
leaq K512+128(%rip),%rbp
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vpshufb %xmm11,%xmm0,%xmm0
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm11,%xmm1,%xmm1
vmovdqu 64(%rsi),%xmm4
vpshufb %xmm11,%xmm2,%xmm2
vmovdqu 80(%rsi),%xmm5
vpshufb %xmm11,%xmm3,%xmm3
vmovdqu 96(%rsi),%xmm6
vpshufb %xmm11,%xmm4,%xmm4
vmovdqu 112(%rsi),%xmm7
vpshufb %xmm11,%xmm5,%xmm5
vpaddq -128(%rbp),%xmm0,%xmm8
vpshufb %xmm11,%xmm6,%xmm6
vpaddq -96(%rbp),%xmm1,%xmm9
vpshufb %xmm11,%xmm7,%xmm7
vpaddq -64(%rbp),%xmm2,%xmm10
vpaddq -32(%rbp),%xmm3,%xmm11
vmovdqa %xmm8,0(%rsp)
vpaddq 0(%rbp),%xmm4,%xmm8
vmovdqa %xmm9,16(%rsp)
vpaddq 32(%rbp),%xmm5,%xmm9
vmovdqa %xmm10,32(%rsp)
vpaddq 64(%rbp),%xmm6,%xmm10
vmovdqa %xmm11,48(%rsp)
vpaddq 96(%rbp),%xmm7,%xmm11
vmovdqa %xmm8,64(%rsp)
movq %rax,%r14
vmovdqa %xmm9,80(%rsp)
movq %rbx,%rdi
vmovdqa %xmm10,96(%rsp)
xorq %rcx,%rdi
vmovdqa %xmm11,112(%rsp)
movq %r8,%r13
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
addq $256,%rbp
vpalignr $8,%xmm0,%xmm1,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm4,%xmm5,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm0,%xmm0
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 0(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm7,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm7,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm0,%xmm0
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm7,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 8(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm0,%xmm0
xorq %r11,%r14
addq %r13,%r10
vpaddq -128(%rbp),%xmm0,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,0(%rsp)
vpalignr $8,%xmm1,%xmm2,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm5,%xmm6,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm1,%xmm1
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 16(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm0,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm0,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm1,%xmm1
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm0,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 24(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm1,%xmm1
xorq %r9,%r14
addq %r13,%r8
vpaddq -96(%rbp),%xmm1,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,16(%rsp)
vpalignr $8,%xmm2,%xmm3,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm6,%xmm7,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm2,%xmm2
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 32(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm1,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm1,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm2,%xmm2
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm1,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 40(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm2,%xmm2
xorq %rdx,%r14
addq %r13,%rcx
vpaddq -64(%rbp),%xmm2,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,32(%rsp)
vpalignr $8,%xmm3,%xmm4,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm7,%xmm0,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm3,%xmm3
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 48(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm2,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm2,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm3,%xmm3
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm2,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 56(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm3,%xmm3
xorq %rbx,%r14
addq %r13,%rax
vpaddq -32(%rbp),%xmm3,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,48(%rsp)
vpalignr $8,%xmm4,%xmm5,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm0,%xmm1,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm4,%xmm4
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 64(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm3,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm3,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm4,%xmm4
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm3,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 72(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm4,%xmm4
xorq %r11,%r14
addq %r13,%r10
vpaddq 0(%rbp),%xmm4,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,64(%rsp)
vpalignr $8,%xmm5,%xmm6,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm1,%xmm2,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm5,%xmm5
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 80(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm4,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm4,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm5,%xmm5
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm4,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 88(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm5,%xmm5
xorq %r9,%r14
addq %r13,%r8
vpaddq 32(%rbp),%xmm5,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,80(%rsp)
vpalignr $8,%xmm6,%xmm7,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm2,%xmm3,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm6,%xmm6
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 96(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm5,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm5,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm6,%xmm6
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm5,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 104(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm6,%xmm6
xorq %rdx,%r14
addq %r13,%rcx
vpaddq 64(%rbp),%xmm6,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,96(%rsp)
vpalignr $8,%xmm7,%xmm0,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm3,%xmm4,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm7,%xmm7
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 112(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm6,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm6,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm7,%xmm7
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm6,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 120(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm7,%xmm7
xorq %rbx,%r14
addq %r13,%rax
vpaddq 96(%rbp),%xmm7,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,112(%rsp)
cmpb $0,135(%rbp)
jne .Lavx_00_47
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 0(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 8(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 16(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 24(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 32(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 40(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 48(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 56(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 64(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 72(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 80(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 88(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 96(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 104(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 112(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 120(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
movq 128+0(%rsp),%rdi
movq %r14,%rax
addq 0(%rdi),%rax
leaq 128(%rsi),%rsi
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb .Lloop_avx
movq 152(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha512_block_data_order_avx,.-sha512_block_data_order_avx
#endif
|
Chigozieworldwide/Multi | 6,269 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-armv4-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL
@ instructions are in aesv8-armx.pl.)
.arch armv7-a
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#define ldrplb ldrbpl
#define ldrneb ldrbne
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl gcm_init_neon
.hidden gcm_init_neon
.type gcm_init_neon,%function
.align 4
gcm_init_neon:
vld1.64 d7,[r1]! @ load H
vmov.i8 q8,#0xe1
vld1.64 d6,[r1]
vshl.i64 d17,#57
vshr.u64 d16,#63 @ t0=0xc2....01
vdup.8 q9,d7[7]
vshr.u64 d26,d6,#63
vshr.s8 q9,#7 @ broadcast carry bit
vshl.i64 q3,q3,#1
vand q8,q8,q9
vorr d7,d26 @ H<<<=1
veor q3,q3,q8 @ twisted H
vstmia r0,{q3}
bx lr @ bx lr
.size gcm_init_neon,.-gcm_init_neon
.globl gcm_gmult_neon
.hidden gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
vld1.64 d7,[r0]! @ load Xi
vld1.64 d6,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q3,q3
#endif
vmov.i64 d31,#0x000000000000ffff
veor d28,d26,d27 @ Karatsuba pre-processing
mov r3,#16
b .Lgmult_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.globl gcm_ghash_neon
.hidden gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
vld1.64 d1,[r0]! @ load Xi
vld1.64 d0,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q0,q0
#endif
vmov.i64 d31,#0x000000000000ffff
veor d28,d26,d27 @ Karatsuba pre-processing
.Loop_neon:
vld1.64 d7,[r2]! @ load inp
vld1.64 d6,[r2]!
#ifdef __ARMEL__
vrev64.8 q3,q3
#endif
veor q3,q0 @ inp^=Xi
.Lgmult_neon:
vext.8 d16, d26, d26, #1 @ A1
vmull.p8 q8, d16, d6 @ F = A1*B
vext.8 d0, d6, d6, #1 @ B1
vmull.p8 q0, d26, d0 @ E = A*B1
vext.8 d18, d26, d26, #2 @ A2
vmull.p8 q9, d18, d6 @ H = A2*B
vext.8 d22, d6, d6, #2 @ B2
vmull.p8 q11, d26, d22 @ G = A*B2
vext.8 d20, d26, d26, #3 @ A3
veor q8, q8, q0 @ L = E + F
vmull.p8 q10, d20, d6 @ J = A3*B
vext.8 d0, d6, d6, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q0, d26, d0 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d6, d6, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d26, d22 @ K = A*B4
veor q10, q10, q0 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q0, d26, d6 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q0, q0, q8
veor q0, q0, q10
veor d6,d6,d7 @ Karatsuba pre-processing
vext.8 d16, d28, d28, #1 @ A1
vmull.p8 q8, d16, d6 @ F = A1*B
vext.8 d2, d6, d6, #1 @ B1
vmull.p8 q1, d28, d2 @ E = A*B1
vext.8 d18, d28, d28, #2 @ A2
vmull.p8 q9, d18, d6 @ H = A2*B
vext.8 d22, d6, d6, #2 @ B2
vmull.p8 q11, d28, d22 @ G = A*B2
vext.8 d20, d28, d28, #3 @ A3
veor q8, q8, q1 @ L = E + F
vmull.p8 q10, d20, d6 @ J = A3*B
vext.8 d2, d6, d6, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q1, d28, d2 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d6, d6, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d28, d22 @ K = A*B4
veor q10, q10, q1 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q1, d28, d6 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q1, q1, q8
veor q1, q1, q10
vext.8 d16, d27, d27, #1 @ A1
vmull.p8 q8, d16, d7 @ F = A1*B
vext.8 d4, d7, d7, #1 @ B1
vmull.p8 q2, d27, d4 @ E = A*B1
vext.8 d18, d27, d27, #2 @ A2
vmull.p8 q9, d18, d7 @ H = A2*B
vext.8 d22, d7, d7, #2 @ B2
vmull.p8 q11, d27, d22 @ G = A*B2
vext.8 d20, d27, d27, #3 @ A3
veor q8, q8, q2 @ L = E + F
vmull.p8 q10, d20, d7 @ J = A3*B
vext.8 d4, d7, d7, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q2, d27, d4 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d7, d7, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d27, d22 @ K = A*B4
veor q10, q10, q2 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q2, d27, d7 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q2, q2, q8
veor q2, q2, q10
veor q1,q1,q0 @ Karatsuba post-processing
veor q1,q1,q2
veor d1,d1,d2
veor d4,d4,d3 @ Xh|Xl - 256-bit result
@ equivalent of reduction_avx from ghash-x86_64.pl
vshl.i64 q9,q0,#57 @ 1st phase
vshl.i64 q10,q0,#62
veor q10,q10,q9 @
vshl.i64 q9,q0,#63
veor q10, q10, q9 @
veor d1,d1,d20 @
veor d4,d4,d21
vshr.u64 q10,q0,#1 @ 2nd phase
veor q2,q2,q0
veor q0,q0,q10 @
vshr.u64 q10,q10,#6
vshr.u64 q0,q0,#1 @
veor q0,q0,q2 @
veor q0,q0,q10 @
subs r3,#16
bne .Loop_neon
#ifdef __ARMEL__
vrev64.8 q0,q0
#endif
sub r0,#16
vst1.64 d1,[r0]! @ write out Xi
vst1.64 d0,[r0]
bx lr @ bx lr
.size gcm_ghash_neon,.-gcm_ghash_neon
#endif
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Chigozieworldwide/Multi | 34,022 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha256-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl _sha256_block_data_order_nohw
.private_extern _sha256_block_data_order_nohw
.align 6
_sha256_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256@PAGE
add x30,x30,LK256@PAGEOFF
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section __TEXT,__const
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl _sha256_block_data_order_hw
.private_extern _sha256_block_data_order_hw
.align 6
_sha256_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256@PAGE
add x3,x3,LK256@PAGEOFF
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Chigozieworldwide/Multi | 20,463 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/x86_64-mont-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,@function
.align 16
bn_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
negq %r9
movq %rsp,%r11
leaq -16(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul_page_walk
jmp .Lmul_page_walk_done
.align 16
.Lmul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul_page_walk
.Lmul_page_walk_done:
movq %rax,8(%rsp,%r9,8)
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
.Lmul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .L1st_enter
.align 16
.L1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.L1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne .L1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp .Louter
.align 16
.Louter:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .Linner_enter
.align 16
.Linner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.Linner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne .Linner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb .Louter
xorq %r14,%r14
movq (%rsp),%rax
movq %r9,%r15
.align 16
.Lsub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsp,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz .Lsub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
.Lcopy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r9,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz .Lcopy
movq 8(%rsp,%r9,8),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul_epilogue:
ret
.cfi_endproc
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
.globl bn_mul4x_mont
.hidden bn_mul4x_mont
.type bn_mul4x_mont,@function
.align 16
bn_mul4x_mont:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
negq %r9
movq %rsp,%r11
leaq -32(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul4x_page_walk
jmp .Lmul4x_page_walk_done
.Lmul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul4x_page_walk
.Lmul4x_page_walk_done:
movq %rax,8(%rsp,%r9,8)
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
.Lmul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .L1st4x
.align 16
.L1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .L1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.align 4
.Louter4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .Linner4x
.align 16
.Linner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .Linner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb .Louter4x
movq 16(%rsp,%r9,8),%rdi
leaq -4(%r9),%r15
movq 0(%rsp),%rax
movq 8(%rsp),%rdx
shrq $2,%r15
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
.Lsub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz .Lsub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
pxor %xmm0,%xmm0
.byte 102,72,15,110,224
pcmpeqd %xmm5,%xmm5
pshufd $0,%xmm4,%xmm4
movq %r9,%r15
pxor %xmm4,%xmm5
shrq $2,%r15
xorl %eax,%eax
jmp .Lcopy4x
.align 16
.Lcopy4x:
movdqa (%rsp,%rax,1),%xmm1
movdqu (%rdi,%rax,1),%xmm2
pand %xmm4,%xmm1
pand %xmm5,%xmm2
movdqa 16(%rsp,%rax,1),%xmm3
movdqa %xmm0,(%rsp,%rax,1)
por %xmm2,%xmm1
movdqu 16(%rdi,%rax,1),%xmm2
movdqu %xmm1,(%rdi,%rax,1)
pand %xmm4,%xmm3
pand %xmm5,%xmm2
movdqa %xmm0,16(%rsp,%rax,1)
por %xmm2,%xmm3
movdqu %xmm3,16(%rdi,%rax,1)
leaq 32(%rax),%rax
decq %r15
jnz .Lcopy4x
movq 8(%rsp,%r9,8),%rsi
.cfi_def_cfa %rsi, 8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul4x_epilogue:
ret
.cfi_endproc
.size bn_mul4x_mont,.-bn_mul4x_mont
.extern bn_sqrx8x_internal
.hidden bn_sqrx8x_internal
.extern bn_sqr8x_internal
.hidden bn_sqr8x_internal
.globl bn_sqr8x_mont
.hidden bn_sqr8x_mont
.type bn_sqr8x_mont,@function
.align 32
bn_sqr8x_mont:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lsqr8x_prologue:
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,2),%r11
movq %rsp,%rbp
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lsqr8x_sp_alt
subq %r11,%rbp
leaq -64(%rbp,%r9,2),%rbp
jmp .Lsqr8x_sp_done
.align 32
.Lsqr8x_sp_alt:
leaq 4096-64(,%r9,2),%r10
leaq -64(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lsqr8x_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lsqr8x_page_walk
jmp .Lsqr8x_page_walk_done
.align 16
.Lsqr8x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lsqr8x_page_walk
.Lsqr8x_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lsqr8x_body:
.byte 102,72,15,110,209
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
testq %rdx,%rdx
jz .Lsqr8x_nox
call bn_sqrx8x_internal
leaq (%r8,%rcx,1),%rbx
movq %rcx,%r9
movq %rcx,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32
.Lsqr8x_nox:
call bn_sqr8x_internal
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32
.Lsqr8x_sub:
movq 0(%rbx),%r12
movq 8(%rbx),%r13
movq 16(%rbx),%r14
movq 24(%rbx),%r15
leaq 32(%rbx),%rbx
sbbq 0(%rbp),%r12
sbbq 8(%rbp),%r13
sbbq 16(%rbp),%r14
sbbq 24(%rbp),%r15
leaq 32(%rbp),%rbp
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz .Lsqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
jmp .Lsqr8x_cond_copy
.align 32
.Lsqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz .Lsqr8x_cond_copy
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lsqr8x_epilogue:
ret
.cfi_endproc
.size bn_sqr8x_mont,.-bn_sqr8x_mont
.globl bn_mulx4x_mont
.hidden bn_mulx4x_mont
.type bn_mulx4x_mont,@function
.align 32
bn_mulx4x_mont:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lmulx4x_prologue:
shll $3,%r9d
xorq %r10,%r10
subq %r9,%r10
movq (%r8),%r8
leaq -72(%rsp,%r10,1),%rbp
andq $-128,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
jmp .Lmulx4x_page_walk_done
.align 16
.Lmulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
.Lmulx4x_page_walk_done:
leaq (%rdx,%r9,1),%r10
movq %r9,0(%rsp)
shrq $5,%r9
movq %r10,16(%rsp)
subq $1,%r9
movq %r8,24(%rsp)
movq %rdi,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
movq %r9,48(%rsp)
jmp .Lmulx4x_body
.align 32
.Lmulx4x_body:
leaq 8(%rdx),%rdi
movq (%rdx),%rdx
leaq 64+32(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r14
addq %rax,%r11
movq %rdi,8(%rsp)
mulxq 16(%rsi),%r12,%r13
adcq %r14,%r12
adcq $0,%r13
movq %r8,%rdi
imulq 24(%rsp),%r8
xorq %rbp,%rbp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%rdi
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00
movq 48(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp .Lmulx4x_1st
.align 32
.Lmulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_1st
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
addq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
jmp .Lmulx4x_outer
.align 32
.Lmulx4x_outer:
movq (%rdi),%rdx
leaq 8(%rdi),%rdi
subq %rax,%rsi
movq %r15,(%rbx)
leaq 64+32(%rsp),%rbx
subq %rax,%rcx
mulxq 0(%rsi),%r8,%r11
xorl %ebp,%ebp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
adoxq -16(%rbx),%r12
adcxq %rbp,%r13
adoxq %rbp,%r13
movq %rdi,8(%rsp)
movq %r8,%r15
imulq 24(%rsp),%r8
xorl %ebp,%ebp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
adcxq %rax,%r13
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
adoxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
leaq 32(%rcx),%rcx
adcxq %rax,%r12
adoxq %rbp,%r15
movq 48(%rsp),%rdi
movq %r12,-16(%rbx)
jmp .Lmulx4x_inner
.align 32
.Lmulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-32(%rbx)
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_inner
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
subq 0(%rbx),%rbp
adcq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
cmpq 16(%rsp),%rdi
jne .Lmulx4x_outer
leaq 64(%rsp),%rbx
subq %rax,%rcx
negq %r15
movq %rax,%rdx
shrq $3+2,%rax
movq 32(%rsp),%rdi
jmp .Lmulx4x_sub
.align 32
.Lmulx4x_sub:
movq 0(%rbx),%r11
movq 8(%rbx),%r12
movq 16(%rbx),%r13
movq 24(%rbx),%r14
leaq 32(%rbx),%rbx
sbbq 0(%rcx),%r11
sbbq 8(%rcx),%r12
sbbq 16(%rcx),%r13
sbbq 24(%rcx),%r14
leaq 32(%rcx),%rcx
movq %r11,0(%rdi)
movq %r12,8(%rdi)
movq %r13,16(%rdi)
movq %r14,24(%rdi)
leaq 32(%rdi),%rdi
decq %rax
jnz .Lmulx4x_sub
sbbq $0,%r15
leaq 64(%rsp),%rbx
subq %rdx,%rdi
.byte 102,73,15,110,207
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
jmp .Lmulx4x_cond_copy
.align 32
.Lmulx4x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
subq $32,%rdx
jnz .Lmulx4x_cond_copy
movq %rdx,(%rbx)
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmulx4x_epilogue:
ret
.cfi_endproc
.size bn_mulx4x_mont,.-bn_mulx4x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 16
#endif
|
Chigozieworldwide/Multi | 7,660 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesv8-armx-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.def aes_hw_set_encrypt_key
.type 32
.endef
.align 5
aes_hw_set_encrypt_key:
Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon
add x3,x3,:lo12:Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
// 192-bit key support was removed.
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
// 192-bit key support was removed.
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl aes_hw_ctr32_encrypt_blocks
.def aes_hw_ctr32_encrypt_blocks
.type 32
.endef
.align 5
aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq Lctr32_done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Chigozieworldwide/Multi | 10,875 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-neon-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.text
.globl _gcm_init_neon
.private_extern _gcm_init_neon
.align 4
_gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl _gcm_gmult_neon
.private_extern _gcm_gmult_neon
.align 4
_gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl _gcm_ghash_neon
.private_extern _gcm_ghash_neon
.align 4
_gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section __TEXT,__const
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Chigozieworldwide/Multi | 21,811 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/armv4-mont-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,%function
.align 5
bn_mul_mont_nohw:
ldr ip,[sp,#4] @ load num
stmdb sp!,{r0,r2} @ sp points at argument block
cmp ip,#2
mov r0,ip @ load num
#ifdef __thumb2__
ittt lt
#endif
movlt r0,#0
addlt sp,sp,#2*4
blt .Labrt
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
mov r0,r0,lsl#2 @ rescale r0 for byte count
sub sp,sp,r0 @ alloca(4*num)
sub sp,sp,#4 @ +extra dword
sub r0,r0,#4 @ "num=num-1"
add r4,r2,r0 @ &bp[num-1]
add r0,sp,r0 @ r0 to point at &tp[num-1]
ldr r8,[r0,#14*4] @ &n0
ldr r2,[r2] @ bp[0]
ldr r5,[r1],#4 @ ap[0],ap++
ldr r6,[r3],#4 @ np[0],np++
ldr r8,[r8] @ *n0
str r4,[r0,#15*4] @ save &bp[num]
umull r10,r11,r5,r2 @ ap[0]*bp[0]
str r8,[r0,#14*4] @ save n0 value
mul r8,r10,r8 @ "tp[0]"*n0
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
mov r4,sp
.L1st:
ldr r5,[r1],#4 @ ap[j],ap++
mov r10,r11
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[0]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne .L1st
adds r12,r12,r11
ldr r4,[r0,#13*4] @ restore bp
mov r14,#0
ldr r8,[r0,#14*4] @ restore n0
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
mov r7,sp
str r14,[r0,#4] @ tp[num]=
.Louter:
sub r7,r0,r7 @ "original" r0-1 value
sub r1,r1,r7 @ "rewind" ap to &ap[1]
ldr r2,[r4,#4]! @ *(++bp)
sub r3,r3,r7 @ "rewind" np to &np[1]
ldr r5,[r1,#-4] @ ap[0]
ldr r10,[sp] @ tp[0]
ldr r6,[r3,#-4] @ np[0]
ldr r7,[sp,#4] @ tp[1]
mov r11,#0
umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
str r4,[r0,#13*4] @ save bp
mul r8,r10,r8
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
mov r4,sp
.Linner:
ldr r5,[r1],#4 @ ap[j],ap++
adds r10,r11,r7 @ +=tp[j]
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[i]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adc r11,r11,#0
ldr r7,[r4,#8] @ tp[j+1]
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne .Linner
adds r12,r12,r11
mov r14,#0
ldr r4,[r0,#13*4] @ restore bp
adc r14,r14,#0
ldr r8,[r0,#14*4] @ restore n0
adds r12,r12,r7
ldr r7,[r0,#15*4] @ restore &bp[num]
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
str r14,[r0,#4] @ tp[num]=
cmp r4,r7
#ifdef __thumb2__
itt ne
#endif
movne r7,sp
bne .Louter
ldr r2,[r0,#12*4] @ pull rp
mov r5,sp
add r0,r0,#4 @ r0 to point at &tp[num]
sub r5,r0,r5 @ "original" num value
mov r4,sp @ "rewind" r4
mov r1,r4 @ "borrow" r1
sub r3,r3,r5 @ "rewind" r3 to &np[0]
subs r7,r7,r7 @ "clear" carry flag
.Lsub: ldr r7,[r4],#4
ldr r6,[r3],#4
sbcs r7,r7,r6 @ tp[j]-np[j]
str r7,[r2],#4 @ rp[j]=
teq r4,r0 @ preserve carry
bne .Lsub
sbcs r14,r14,#0 @ upmost carry
mov r4,sp @ "rewind" r4
sub r2,r2,r5 @ "rewind" r2
.Lcopy: ldr r7,[r4] @ conditional copy
ldr r5,[r2]
str sp,[r4],#4 @ zap tp
#ifdef __thumb2__
it cc
#endif
movcc r5,r7
str r5,[r2],#4
teq r4,r0 @ preserve carry
bne .Lcopy
mov sp,r0
add sp,sp,#4 @ skip over tp[num+1]
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
add sp,sp,#2*4 @ skip over {r0,r2}
mov r0,#1
.Labrt:
#if __ARM_ARCH>=5
bx lr @ bx lr
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl bn_mul8x_mont_neon
.hidden bn_mul8x_mont_neon
.type bn_mul8x_mont_neon,%function
.align 5
bn_mul8x_mont_neon:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load rest of parameter block
mov ip,sp
cmp r5,#8
bhi .LNEON_8n
@ special case for r5==8, everything is in register bank...
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
sub r7,sp,r5,lsl#4
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
and r7,r7,#-64
vld1.32 {d30[0]}, [r4,:32]
mov sp,r7 @ alloca
vzip.16 d28,d8
vmull.u32 q6,d28,d0[0]
vmull.u32 q7,d28,d0[1]
vmull.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmull.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
vmul.u32 d29,d29,d30
vmull.u32 q10,d28,d2[0]
vld1.32 {d4,d5,d6,d7}, [r3]!
vmull.u32 q11,d28,d2[1]
vmull.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmull.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
sub r9,r5,#1
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
b .LNEON_outer8
.align 4
.LNEON_outer8:
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
vzip.16 d28,d8
vadd.u64 d12,d12,d10
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
subs r9,r9,#1
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
bne .LNEON_outer8
vadd.u64 d12,d12,d10
mov r7,sp
vshr.u64 d10,d12,#16
mov r8,r5
vadd.u64 d13,d13,d10
add r6,sp,#96
vshr.u64 d10,d13,#16
vzip.16 d12,d13
b .LNEON_tail_entry
.align 4
.LNEON_8n:
veor q6,q6,q6
sub r7,sp,#128
veor q7,q7,q7
sub r7,r7,r5,lsl#4
veor q8,q8,q8
and r7,r7,#-64
veor q9,q9,q9
mov sp,r7 @ alloca
veor q10,q10,q10
add r7,r7,#256
veor q11,q11,q11
sub r8,r5,#8
veor q12,q12,q12
veor q13,q13,q13
.LNEON_8n_init:
vst1.64 {q6,q7},[r7,:256]!
subs r8,r8,#8
vst1.64 {q8,q9},[r7,:256]!
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12,q13},[r7,:256]!
bne .LNEON_8n_init
add r6,sp,#256
vld1.32 {d0,d1,d2,d3},[r1]!
add r10,sp,#8
vld1.32 {d30[0]},[r4,:32]
mov r9,r5
b .LNEON_8n_outer
.align 4
.LNEON_8n_outer:
vld1.32 {d28[0]},[r2,:32]! @ *b++
veor d8,d8,d8
vzip.16 d28,d8
add r7,sp,#128
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
vmlal.u32 q10,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q11,d28,d2[1]
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q6,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q7,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q8,d29,d5[0]
vshr.u64 d12,d12,#16
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vadd.u64 d12,d12,d13
vmlal.u32 q11,d29,d6[1]
vshr.u64 d12,d12,#16
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vadd.u64 d14,d14,d12
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]!
vmlal.u32 q8,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q9,d28,d1[0]
vshl.i64 d29,d15,#16
vmlal.u32 q10,d28,d1[1]
vadd.u64 d29,d29,d14
vmlal.u32 q11,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q12,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
vmlal.u32 q13,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q7,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q8,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q9,d29,d5[0]
vshr.u64 d14,d14,#16
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vadd.u64 d14,d14,d15
vmlal.u32 q12,d29,d6[1]
vshr.u64 d14,d14,#16
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vadd.u64 d16,d16,d14
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]!
vmlal.u32 q9,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q10,d28,d1[0]
vshl.i64 d29,d17,#16
vmlal.u32 q11,d28,d1[1]
vadd.u64 d29,d29,d16
vmlal.u32 q12,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q13,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
vmlal.u32 q6,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q8,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q9,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q10,d29,d5[0]
vshr.u64 d16,d16,#16
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vadd.u64 d16,d16,d17
vmlal.u32 q13,d29,d6[1]
vshr.u64 d16,d16,#16
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vadd.u64 d18,d18,d16
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]!
vmlal.u32 q10,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q11,d28,d1[0]
vshl.i64 d29,d19,#16
vmlal.u32 q12,d28,d1[1]
vadd.u64 d29,d29,d18
vmlal.u32 q13,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q6,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
vmlal.u32 q7,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q9,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q10,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q11,d29,d5[0]
vshr.u64 d18,d18,#16
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vadd.u64 d18,d18,d19
vmlal.u32 q6,d29,d6[1]
vshr.u64 d18,d18,#16
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vadd.u64 d20,d20,d18
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]!
vmlal.u32 q11,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q12,d28,d1[0]
vshl.i64 d29,d21,#16
vmlal.u32 q13,d28,d1[1]
vadd.u64 d29,d29,d20
vmlal.u32 q6,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q7,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
vmlal.u32 q8,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q10,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q11,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q12,d29,d5[0]
vshr.u64 d20,d20,#16
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vadd.u64 d20,d20,d21
vmlal.u32 q7,d29,d6[1]
vshr.u64 d20,d20,#16
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vadd.u64 d22,d22,d20
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]!
vmlal.u32 q12,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q13,d28,d1[0]
vshl.i64 d29,d23,#16
vmlal.u32 q6,d28,d1[1]
vadd.u64 d29,d29,d22
vmlal.u32 q7,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q8,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
vmlal.u32 q9,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q11,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q12,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q13,d29,d5[0]
vshr.u64 d22,d22,#16
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vadd.u64 d22,d22,d23
vmlal.u32 q8,d29,d6[1]
vshr.u64 d22,d22,#16
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vadd.u64 d24,d24,d22
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]!
vmlal.u32 q13,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q6,d28,d1[0]
vshl.i64 d29,d25,#16
vmlal.u32 q7,d28,d1[1]
vadd.u64 d29,d29,d24
vmlal.u32 q8,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q9,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
vmlal.u32 q10,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q12,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q13,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q6,d29,d5[0]
vshr.u64 d24,d24,#16
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vadd.u64 d24,d24,d25
vmlal.u32 q9,d29,d6[1]
vshr.u64 d24,d24,#16
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vadd.u64 d26,d26,d24
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]!
vmlal.u32 q6,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q7,d28,d1[0]
vshl.i64 d29,d27,#16
vmlal.u32 q8,d28,d1[1]
vadd.u64 d29,d29,d26
vmlal.u32 q9,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
vmlal.u32 q11,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q12,d28,d3[1]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q13,d29,d4[0]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q6,d29,d4[1]
vmlal.u32 q7,d29,d5[0]
vshr.u64 d26,d26,#16
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vadd.u64 d26,d26,d27
vmlal.u32 q10,d29,d6[1]
vshr.u64 d26,d26,#16
vmlal.u32 q11,d29,d7[0]
vmlal.u32 q12,d29,d7[1]
vadd.u64 d12,d12,d26
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
add r10,sp,#8 @ rewind
sub r8,r5,#8
b .LNEON_8n_inner
.align 4
.LNEON_8n_inner:
subs r8,r8,#8
vmlal.u32 q6,d28,d0[0]
vld1.64 {q13},[r6,:128]
vmlal.u32 q7,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
vmlal.u32 q8,d28,d1[0]
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q9,d28,d1[1]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmlal.u32 q11,d29,d6[1]
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vst1.64 {q6},[r7,:128]!
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]
vmlal.u32 q8,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
vmlal.u32 q9,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d1[1]
vmlal.u32 q11,d28,d2[0]
vmlal.u32 q12,d28,d2[1]
vmlal.u32 q13,d28,d3[0]
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
vmlal.u32 q7,d29,d4[0]
vmlal.u32 q8,d29,d4[1]
vmlal.u32 q9,d29,d5[0]
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vmlal.u32 q12,d29,d6[1]
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vst1.64 {q7},[r7,:128]!
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]
vmlal.u32 q9,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
vmlal.u32 q10,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q11,d28,d1[1]
vmlal.u32 q12,d28,d2[0]
vmlal.u32 q13,d28,d2[1]
vmlal.u32 q6,d28,d3[0]
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
vmlal.u32 q8,d29,d4[0]
vmlal.u32 q9,d29,d4[1]
vmlal.u32 q10,d29,d5[0]
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vmlal.u32 q13,d29,d6[1]
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vst1.64 {q8},[r7,:128]!
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]
vmlal.u32 q10,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
vmlal.u32 q11,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q12,d28,d1[1]
vmlal.u32 q13,d28,d2[0]
vmlal.u32 q6,d28,d2[1]
vmlal.u32 q7,d28,d3[0]
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
vmlal.u32 q9,d29,d4[0]
vmlal.u32 q10,d29,d4[1]
vmlal.u32 q11,d29,d5[0]
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vmlal.u32 q6,d29,d6[1]
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vst1.64 {q9},[r7,:128]!
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]
vmlal.u32 q11,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
vmlal.u32 q12,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q13,d28,d1[1]
vmlal.u32 q6,d28,d2[0]
vmlal.u32 q7,d28,d2[1]
vmlal.u32 q8,d28,d3[0]
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
vmlal.u32 q10,d29,d4[0]
vmlal.u32 q11,d29,d4[1]
vmlal.u32 q12,d29,d5[0]
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vmlal.u32 q7,d29,d6[1]
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vst1.64 {q10},[r7,:128]!
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]
vmlal.u32 q12,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
vmlal.u32 q13,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q6,d28,d1[1]
vmlal.u32 q7,d28,d2[0]
vmlal.u32 q8,d28,d2[1]
vmlal.u32 q9,d28,d3[0]
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
vmlal.u32 q11,d29,d4[0]
vmlal.u32 q12,d29,d4[1]
vmlal.u32 q13,d29,d5[0]
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vmlal.u32 q8,d29,d6[1]
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vst1.64 {q11},[r7,:128]!
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]
vmlal.u32 q13,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
vmlal.u32 q6,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q7,d28,d1[1]
vmlal.u32 q8,d28,d2[0]
vmlal.u32 q9,d28,d2[1]
vmlal.u32 q10,d28,d3[0]
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
vmlal.u32 q12,d29,d4[0]
vmlal.u32 q13,d29,d4[1]
vmlal.u32 q6,d29,d5[0]
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vmlal.u32 q9,d29,d6[1]
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vst1.64 {q12},[r7,:128]!
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]
vmlal.u32 q6,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
vmlal.u32 q7,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q8,d28,d1[1]
vmlal.u32 q9,d28,d2[0]
vmlal.u32 q10,d28,d2[1]
vmlal.u32 q11,d28,d3[0]
vmlal.u32 q12,d28,d3[1]
it eq
subeq r1,r1,r5,lsl#2 @ rewind
vmlal.u32 q13,d29,d4[0]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q6,d29,d4[1]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q7,d29,d5[0]
add r10,sp,#8 @ rewind
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vmlal.u32 q10,d29,d6[1]
vmlal.u32 q11,d29,d7[0]
vst1.64 {q13},[r7,:128]!
vmlal.u32 q12,d29,d7[1]
bne .LNEON_8n_inner
add r6,sp,#128
vst1.64 {q6,q7},[r7,:256]!
veor q2,q2,q2 @ d4-d5
vst1.64 {q8,q9},[r7,:256]!
veor q3,q3,q3 @ d6-d7
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12},[r7,:128]
subs r9,r9,#8
vld1.64 {q6,q7},[r6,:256]!
vld1.64 {q8,q9},[r6,:256]!
vld1.64 {q10,q11},[r6,:256]!
vld1.64 {q12,q13},[r6,:256]!
itt ne
subne r3,r3,r5,lsl#2 @ rewind
bne .LNEON_8n_outer
add r7,sp,#128
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
vshr.u64 d10,d12,#16
vst1.64 {q2,q3},[sp,:256]!
vadd.u64 d13,d13,d10
vst1.64 {q2,q3}, [sp,:256]!
vshr.u64 d10,d13,#16
vst1.64 {q2,q3}, [sp,:256]!
vzip.16 d12,d13
mov r8,r5
b .LNEON_tail_entry
.align 4
.LNEON_tail:
vadd.u64 d12,d12,d10
vshr.u64 d10,d12,#16
vld1.64 {q8,q9}, [r6, :256]!
vadd.u64 d13,d13,d10
vld1.64 {q10,q11}, [r6, :256]!
vshr.u64 d10,d13,#16
vld1.64 {q12,q13}, [r6, :256]!
vzip.16 d12,d13
.LNEON_tail_entry:
vadd.u64 d14,d14,d10
vst1.32 {d12[0]}, [r7, :32]!
vshr.u64 d10,d14,#16
vadd.u64 d15,d15,d10
vshr.u64 d10,d15,#16
vzip.16 d14,d15
vadd.u64 d16,d16,d10
vst1.32 {d14[0]}, [r7, :32]!
vshr.u64 d10,d16,#16
vadd.u64 d17,d17,d10
vshr.u64 d10,d17,#16
vzip.16 d16,d17
vadd.u64 d18,d18,d10
vst1.32 {d16[0]}, [r7, :32]!
vshr.u64 d10,d18,#16
vadd.u64 d19,d19,d10
vshr.u64 d10,d19,#16
vzip.16 d18,d19
vadd.u64 d20,d20,d10
vst1.32 {d18[0]}, [r7, :32]!
vshr.u64 d10,d20,#16
vadd.u64 d21,d21,d10
vshr.u64 d10,d21,#16
vzip.16 d20,d21
vadd.u64 d22,d22,d10
vst1.32 {d20[0]}, [r7, :32]!
vshr.u64 d10,d22,#16
vadd.u64 d23,d23,d10
vshr.u64 d10,d23,#16
vzip.16 d22,d23
vadd.u64 d24,d24,d10
vst1.32 {d22[0]}, [r7, :32]!
vshr.u64 d10,d24,#16
vadd.u64 d25,d25,d10
vshr.u64 d10,d25,#16
vzip.16 d24,d25
vadd.u64 d26,d26,d10
vst1.32 {d24[0]}, [r7, :32]!
vshr.u64 d10,d26,#16
vadd.u64 d27,d27,d10
vshr.u64 d10,d27,#16
vzip.16 d26,d27
vld1.64 {q6,q7}, [r6, :256]!
subs r8,r8,#8
vst1.32 {d26[0]}, [r7, :32]!
bne .LNEON_tail
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
sub r3,r3,r5,lsl#2 @ rewind r3
subs r1,sp,#0 @ clear carry flag
add r2,sp,r5,lsl#2
.LNEON_sub:
ldmia r1!, {r4,r5,r6,r7}
ldmia r3!, {r8,r9,r10,r11}
sbcs r8, r4,r8
sbcs r9, r5,r9
sbcs r10,r6,r10
sbcs r11,r7,r11
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_sub
ldr r10, [r1] @ load top-most bit
mov r11,sp
veor q0,q0,q0
sub r11,r2,r11 @ this is num*4
veor q1,q1,q1
mov r1,sp
sub r0,r0,r11 @ rewind r0
mov r3,r2 @ second 3/4th of frame
sbcs r10,r10,#0 @ result is carry flag
.LNEON_copy_n_zap:
ldmia r1!, {r4,r5,r6,r7}
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r3,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
ldmia r1, {r4,r5,r6,r7}
stmia r0!, {r8,r9,r10,r11}
sub r1,r1,#16
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r1,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_copy_n_zap
mov sp,ip
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
bx lr @ bx lr
.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
#endif
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Chigozieworldwide/Multi | 68,707 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/p256-x86_64-asm-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.section __DATA,__const
.p2align 6
L$poly:
.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
L$One:
.long 1,1,1,1,1,1,1,1
L$Two:
.long 2,2,2,2,2,2,2,2
L$Three:
.long 3,3,3,3,3,3,3,3
L$ONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
L$ord:
.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
L$ordK:
.quad 0xccd1c8aaee00bc4f
.text
.globl _ecp_nistz256_neg
.private_extern _ecp_nistz256_neg
.p2align 5
_ecp_nistz256_neg:
_CET_ENDBR
pushq %r12
pushq %r13
L$neg_body:
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
xorq %r13,%r13
subq 0(%rsi),%r8
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r8,%rax
sbbq 24(%rsi),%r11
leaq L$poly(%rip),%rsi
movq %r9,%rdx
sbbq $0,%r13
addq 0(%rsi),%r8
movq %r10,%rcx
adcq 8(%rsi),%r9
adcq 16(%rsi),%r10
movq %r11,%r12
adcq 24(%rsi),%r11
testq %r13,%r13
cmovzq %rax,%r8
cmovzq %rdx,%r9
movq %r8,0(%rdi)
cmovzq %rcx,%r10
movq %r9,8(%rdi)
cmovzq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq 0(%rsp),%r13
movq 8(%rsp),%r12
leaq 16(%rsp),%rsp
L$neg_epilogue:
ret
.globl _ecp_nistz256_ord_mul_mont_nohw
.private_extern _ecp_nistz256_ord_mul_mont_nohw
.p2align 5
_ecp_nistz256_ord_mul_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_mul_body:
movq 0(%rdx),%rax
movq %rdx,%rbx
leaq L$ord(%rip),%r14
movq L$ordK(%rip),%r15
movq %rax,%rcx
mulq 0(%rsi)
movq %rax,%r8
movq %rcx,%rax
movq %rdx,%r9
mulq 8(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq 16(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %r8,%r13
imulq %r15,%r8
movq %rdx,%r11
mulq 24(%rsi)
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq 0(%r14)
movq %r8,%rbp
addq %rax,%r13
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rcx
subq %r8,%r10
sbbq $0,%r8
mulq 8(%r14)
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %rbp,%rax
adcq %rdx,%r10
movq %rbp,%rdx
adcq $0,%r8
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 8(%rbx),%rax
sbbq %rdx,%rbp
addq %r8,%r11
adcq %rbp,%r12
adcq $0,%r13
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %r9,%rcx
imulq %r15,%r9
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r12
adcq $0,%rdx
xorq %r8,%r8
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
mulq 0(%r14)
movq %r9,%rbp
addq %rax,%rcx
movq %r9,%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%r9
mulq 8(%r14)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq %rdx,%r11
movq %rbp,%rdx
adcq $0,%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r12
movq 16(%rbx),%rax
sbbq %rdx,%rbp
addq %r9,%r12
adcq %rbp,%r13
adcq $0,%r8
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %r10,%rcx
imulq %r15,%r10
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r13
adcq $0,%rdx
xorq %r9,%r9
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
mulq 0(%r14)
movq %r10,%rbp
addq %rax,%rcx
movq %r10,%rax
adcq %rdx,%rcx
subq %r10,%r12
sbbq $0,%r10
mulq 8(%r14)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq %rdx,%r12
movq %rbp,%rdx
adcq $0,%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r13
movq 24(%rbx),%rax
sbbq %rdx,%rbp
addq %r10,%r13
adcq %rbp,%r8
adcq $0,%r9
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rcx,%rax
adcq $0,%rdx
movq %r11,%rcx
imulq %r15,%r11
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r8
adcq $0,%rdx
xorq %r10,%r10
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
mulq 0(%r14)
movq %r11,%rbp
addq %rax,%rcx
movq %r11,%rax
adcq %rdx,%rcx
subq %r11,%r13
sbbq $0,%r11
mulq 8(%r14)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq %rdx,%r13
movq %rbp,%rdx
adcq $0,%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
sbbq %rdx,%rbp
addq %r11,%r8
adcq %rbp,%r9
adcq $0,%r10
movq %r12,%rsi
subq 0(%r14),%r12
movq %r13,%r11
sbbq 8(%r14),%r13
movq %r8,%rcx
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rsi,%r12
cmovcq %r11,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_mul_epilogue:
ret
.globl _ecp_nistz256_ord_sqr_mont_nohw
.private_extern _ecp_nistz256_ord_sqr_mont_nohw
.p2align 5
_ecp_nistz256_ord_sqr_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_sqr_body:
movq 0(%rsi),%r8
movq 8(%rsi),%rax
movq 16(%rsi),%r14
movq 24(%rsi),%r15
leaq L$ord(%rip),%rsi
movq %rdx,%rbx
jmp L$oop_ord_sqr
.p2align 5
L$oop_ord_sqr:
movq %rax,%rbp
mulq %r8
movq %rax,%r9
.byte 102,72,15,110,205
movq %r14,%rax
movq %rdx,%r10
mulq %r8
addq %rax,%r10
movq %r15,%rax
.byte 102,73,15,110,214
adcq $0,%rdx
movq %rdx,%r11
mulq %r8
addq %rax,%r11
movq %r15,%rax
.byte 102,73,15,110,223
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
movq %rax,%r13
movq %r14,%rax
movq %rdx,%r14
mulq %rbp
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r15
mulq %rbp
addq %rax,%r12
adcq $0,%rdx
addq %r15,%r12
adcq %rdx,%r13
adcq $0,%r14
xorq %r15,%r15
movq %r8,%rax
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
.byte 102,72,15,126,200
movq %rdx,%rbp
mulq %rax
addq %rbp,%r9
adcq %rax,%r10
.byte 102,72,15,126,208
adcq $0,%rdx
movq %rdx,%rbp
mulq %rax
addq %rbp,%r11
adcq %rax,%r12
.byte 102,72,15,126,216
adcq $0,%rdx
movq %rdx,%rbp
movq %r8,%rcx
imulq 32(%rsi),%r8
mulq %rax
addq %rbp,%r13
adcq %rax,%r14
movq 0(%rsi),%rax
adcq %rdx,%r15
mulq %r8
movq %r8,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r8,%r10
sbbq $0,%rbp
mulq %r8
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %r8,%rax
adcq %rdx,%r10
movq %r8,%rdx
adcq $0,%rbp
movq %r9,%rcx
imulq 32(%rsi),%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 0(%rsi),%rax
sbbq %rdx,%r8
addq %rbp,%r11
adcq $0,%r8
mulq %r9
movq %r9,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%rbp
mulq %r9
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %r9,%rax
adcq %rdx,%r11
movq %r9,%rdx
adcq $0,%rbp
movq %r10,%rcx
imulq 32(%rsi),%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
movq 0(%rsi),%rax
sbbq %rdx,%r9
addq %rbp,%r8
adcq $0,%r9
mulq %r10
movq %r10,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r10,%r8
sbbq $0,%rbp
mulq %r10
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %r10,%rax
adcq %rdx,%r8
movq %r10,%rdx
adcq $0,%rbp
movq %r11,%rcx
imulq 32(%rsi),%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r9
movq 0(%rsi),%rax
sbbq %rdx,%r10
addq %rbp,%r9
adcq $0,%r10
mulq %r11
movq %r11,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r11,%r9
sbbq $0,%rbp
mulq %r11
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
movq %r11,%rdx
adcq $0,%rbp
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r10
sbbq %rdx,%r11
addq %rbp,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r12,%r8
adcq %r13,%r9
movq %r8,%r12
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%rax
adcq $0,%rdx
subq 0(%rsi),%r8
movq %r10,%r14
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r15
sbbq 24(%rsi),%r11
sbbq $0,%rdx
cmovcq %r12,%r8
cmovncq %r9,%rax
cmovncq %r10,%r14
cmovncq %r11,%r15
decq %rbx
jnz L$oop_ord_sqr
movq %r8,0(%rdi)
movq %rax,8(%rdi)
pxor %xmm1,%xmm1
movq %r14,16(%rdi)
pxor %xmm2,%xmm2
movq %r15,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_sqr_epilogue:
ret
.globl _ecp_nistz256_ord_mul_mont_adx
.private_extern _ecp_nistz256_ord_mul_mont_adx
.p2align 5
_ecp_nistz256_ord_mul_mont_adx:
L$ecp_nistz256_ord_mul_mont_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
leaq L$ord-128(%rip),%r14
movq L$ordK(%rip),%r15
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
mulxq %r11,%rbp,%r11
addq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
mulxq %r15,%rdx,%rax
adcq %rbp,%r10
adcq %rcx,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24+128(%r14),%rcx,%rbp
movq 8(%rbx),%rdx
adcxq %rcx,%r11
adoxq %rbp,%r12
adcxq %r8,%r12
adoxq %r8,%r13
adcq $0,%r13
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%r14),%rcx,%rbp
movq 16(%rbx),%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r9,%r13
adoxq %r9,%r8
adcq $0,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%r14),%rcx,%rbp
movq 24(%rbx),%rdx
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r10,%r8
adoxq %r10,%r9
adcq $0,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r8
adoxq %rbp,%r9
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%r14),%rcx,%rbp
leaq 128(%r14),%r14
movq %r12,%rbx
adcxq %rcx,%r8
adoxq %rbp,%r9
movq %r13,%rdx
adcxq %r11,%r9
adoxq %r11,%r10
adcq $0,%r10
movq %r8,%rcx
subq 0(%r14),%r12
sbbq 8(%r14),%r13
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_mulx_epilogue:
ret
.globl _ecp_nistz256_ord_sqr_mont_adx
.private_extern _ecp_nistz256_ord_sqr_mont_adx
.p2align 5
_ecp_nistz256_ord_sqr_mont_adx:
_CET_ENDBR
L$ecp_nistz256_ord_sqr_mont_adx:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_sqrx_body:
movq %rdx,%rbx
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq L$ord(%rip),%rsi
jmp L$oop_ord_sqrx
.p2align 5
L$oop_ord_sqrx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
movq %rdx,%rax
.byte 102,73,15,110,206
mulxq %r8,%rbp,%r12
movq %r14,%rdx
addq %rcx,%r10
.byte 102,73,15,110,215
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq %rax,%rdx
.byte 102,73,15,110,216
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
.byte 102,72,15,126,202
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
.byte 102,72,15,126,210
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
mulxq %rdx,%rcx,%rbp
.byte 0x67
.byte 102,72,15,126,218
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
adoxq %rbp,%r13
mulxq %rdx,%rcx,%rax
adoxq %rcx,%r14
adoxq %rax,%r15
movq %r8,%rdx
mulxq 32(%rsi),%rdx,%rcx
xorq %rax,%rax
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
adcxq %rax,%r8
movq %r9,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
adoxq %rax,%r9
movq %r10,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
adcxq %rax,%r10
movq %r11,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
adoxq %rax,%r11
addq %r8,%r12
adcq %r13,%r9
movq %r12,%rdx
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%r14
adcq $0,%rax
subq 0(%rsi),%r12
movq %r10,%r15
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r8
sbbq 24(%rsi),%r11
sbbq $0,%rax
cmovncq %r12,%rdx
cmovncq %r9,%r14
cmovncq %r10,%r15
cmovncq %r11,%r8
decq %rbx
jnz L$oop_ord_sqrx
movq %rdx,0(%rdi)
movq %r14,8(%rdi)
pxor %xmm1,%xmm1
movq %r15,16(%rdi)
pxor %xmm2,%xmm2
movq %r8,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_sqrx_epilogue:
ret
.globl _ecp_nistz256_mul_mont_nohw
.private_extern _ecp_nistz256_mul_mont_nohw
.p2align 5
_ecp_nistz256_mul_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mul_body:
movq %rdx,%rbx
movq 0(%rdx),%rax
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
call __ecp_nistz256_mul_montq
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$mul_epilogue:
ret
.p2align 5
__ecp_nistz256_mul_montq:
movq %rax,%rbp
mulq %r9
movq L$poly+8(%rip),%r14
movq %rax,%r8
movq %rbp,%rax
movq %rdx,%r9
mulq %r10
movq L$poly+24(%rip),%r15
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %r11
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r12
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
xorq %r13,%r13
movq %rdx,%r12
movq %r8,%rbp
shlq $32,%r8
mulq %r15
shrq $32,%rbp
addq %r8,%r9
adcq %rbp,%r10
adcq %rax,%r11
movq 8(%rbx),%rax
adcq %rdx,%r12
adcq $0,%r13
xorq %r8,%r8
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
movq %r9,%rbp
shlq $32,%r9
mulq %r15
shrq $32,%rbp
addq %r9,%r10
adcq %rbp,%r11
adcq %rax,%r12
movq 16(%rbx),%rax
adcq %rdx,%r13
adcq $0,%r8
xorq %r9,%r9
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
movq %r10,%rbp
shlq $32,%r10
mulq %r15
shrq $32,%rbp
addq %r10,%r11
adcq %rbp,%r12
adcq %rax,%r13
movq 24(%rbx),%rax
adcq %rdx,%r8
adcq $0,%r9
xorq %r10,%r10
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
movq %r11,%rbp
shlq $32,%r11
mulq %r15
shrq $32,%rbp
addq %r11,%r12
adcq %rbp,%r13
movq %r12,%rcx
adcq %rax,%r8
adcq %rdx,%r9
movq %r13,%rbp
adcq $0,%r10
subq $-1,%r12
movq %r8,%rbx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rdx
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rcx,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rbx,%r8
movq %r13,8(%rdi)
cmovcq %rdx,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_sqr_mont_nohw
.private_extern _ecp_nistz256_sqr_mont_nohw
.p2align 5
_ecp_nistz256_sqr_mont_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqr_body:
movq 0(%rsi),%rax
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
call __ecp_nistz256_sqr_montq
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$sqr_epilogue:
ret
.p2align 5
__ecp_nistz256_sqr_montq:
movq %rax,%r13
mulq %r14
movq %rax,%r9
movq %r15,%rax
movq %rdx,%r10
mulq %r13
addq %rax,%r10
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r13
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq %r14
addq %rax,%r12
movq %r8,%rax
adcq $0,%rdx
addq %rbp,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r15
xorq %r15,%r15
addq %rax,%r13
movq 0(%rsi),%rax
movq %rdx,%r14
adcq $0,%r14
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
movq 8(%rsi),%rax
movq %rdx,%rcx
mulq %rax
addq %rcx,%r9
adcq %rax,%r10
movq 16(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r11
adcq %rax,%r12
movq 24(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r13
adcq %rax,%r14
movq %r8,%rax
adcq %rdx,%r15
movq L$poly+8(%rip),%rsi
movq L$poly+24(%rip),%rbp
movq %r8,%rcx
shlq $32,%r8
mulq %rbp
shrq $32,%rcx
addq %r8,%r9
adcq %rcx,%r10
adcq %rax,%r11
movq %r9,%rax
adcq $0,%rdx
movq %r9,%rcx
shlq $32,%r9
movq %rdx,%r8
mulq %rbp
shrq $32,%rcx
addq %r9,%r10
adcq %rcx,%r11
adcq %rax,%r8
movq %r10,%rax
adcq $0,%rdx
movq %r10,%rcx
shlq $32,%r10
movq %rdx,%r9
mulq %rbp
shrq $32,%rcx
addq %r10,%r11
adcq %rcx,%r8
adcq %rax,%r9
movq %r11,%rax
adcq $0,%rdx
movq %r11,%rcx
shlq $32,%r11
movq %rdx,%r10
mulq %rbp
shrq $32,%rcx
addq %r11,%r8
adcq %rcx,%r9
adcq %rax,%r10
adcq $0,%rdx
xorq %r11,%r11
addq %r8,%r12
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %rdx,%r15
movq %r13,%r9
adcq $0,%r11
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%rcx
sbbq %rbp,%r15
sbbq $0,%r11
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %rcx,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.globl _ecp_nistz256_mul_mont_adx
.private_extern _ecp_nistz256_mul_mont_adx
.p2align 5
_ecp_nistz256_mul_mont_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$mulx_epilogue:
ret
.p2align 5
__ecp_nistz256_mul_montx:
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
movq $32,%r14
xorq %r13,%r13
mulxq %r11,%rbp,%r11
movq L$poly+24(%rip),%r15
adcq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
adcq %rbp,%r10
shlxq %r14,%r8,%rbp
adcq %rcx,%r11
shrxq %r14,%r8,%rcx
adcq $0,%r12
addq %rbp,%r9
adcq %rcx,%r10
mulxq %r15,%rcx,%rbp
movq 8(%rbx),%rdx
adcq %rcx,%r11
adcq %rbp,%r12
adcq $0,%r13
xorq %r8,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
adcxq %rcx,%r12
shlxq %r14,%r9,%rcx
adoxq %rbp,%r13
shrxq %r14,%r9,%rbp
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
addq %rcx,%r10
adcq %rbp,%r11
mulxq %r15,%rcx,%rbp
movq 16(%rbx),%rdx
adcq %rcx,%r12
adcq %rbp,%r13
adcq $0,%r8
xorq %r9,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
adcxq %rcx,%r13
shlxq %r14,%r10,%rcx
adoxq %rbp,%r8
shrxq %r14,%r10,%rbp
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
addq %rcx,%r11
adcq %rbp,%r12
mulxq %r15,%rcx,%rbp
movq 24(%rbx),%rdx
adcq %rcx,%r13
adcq %rbp,%r8
adcq $0,%r9
xorq %r10,%r10
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
adcxq %rcx,%r8
shlxq %r14,%r11,%rcx
adoxq %rbp,%r9
shrxq %r14,%r11,%rbp
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
addq %rcx,%r12
adcq %rbp,%r13
mulxq %r15,%rcx,%rbp
movq %r12,%rbx
movq L$poly+8(%rip),%r14
adcq %rcx,%r8
movq %r13,%rdx
adcq %rbp,%r9
adcq $0,%r10
xorl %eax,%eax
movq %r8,%rcx
sbbq $-1,%r12
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rbp
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %rbp,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_sqr_mont_adx
.private_extern _ecp_nistz256_sqr_mont_adx
.p2align 5
_ecp_nistz256_sqr_mont_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqrx_body:
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq -128(%rsi),%rsi
call __ecp_nistz256_sqr_montx
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$sqrx_epilogue:
ret
.p2align 5
__ecp_nistz256_sqr_montx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
xorl %eax,%eax
adcq %rcx,%r10
mulxq %r8,%rbp,%r12
movq %r14,%rdx
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq 0+128(%rsi),%rdx
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
movq 8+128(%rsi),%rdx
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
movq 16+128(%rsi),%rdx
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
.byte 0x67
mulxq %rdx,%rcx,%rbp
movq 24+128(%rsi),%rdx
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
movq $32,%rsi
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %rdx,%rcx,%rax
movq L$poly+24(%rip),%rdx
adoxq %rcx,%r14
shlxq %rsi,%r8,%rcx
adoxq %rax,%r15
shrxq %rsi,%r8,%rax
movq %rdx,%rbp
addq %rcx,%r9
adcq %rax,%r10
mulxq %r8,%rcx,%r8
adcq %rcx,%r11
shlxq %rsi,%r9,%rcx
adcq $0,%r8
shrxq %rsi,%r9,%rax
addq %rcx,%r10
adcq %rax,%r11
mulxq %r9,%rcx,%r9
adcq %rcx,%r8
shlxq %rsi,%r10,%rcx
adcq $0,%r9
shrxq %rsi,%r10,%rax
addq %rcx,%r11
adcq %rax,%r8
mulxq %r10,%rcx,%r10
adcq %rcx,%r9
shlxq %rsi,%r11,%rcx
adcq $0,%r10
shrxq %rsi,%r11,%rax
addq %rcx,%r8
adcq %rax,%r9
mulxq %r11,%rcx,%r11
adcq %rcx,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r8,%r12
movq L$poly+8(%rip),%rsi
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %r11,%r15
movq %r13,%r9
adcq $0,%rdx
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%r11
sbbq %rbp,%r15
sbbq $0,%rdx
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %r11,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.globl _ecp_nistz256_select_w5_nohw
.private_extern _ecp_nistz256_select_w5_nohw
.p2align 5
_ecp_nistz256_select_w5_nohw:
_CET_ENDBR
movdqa L$One(%rip),%xmm0
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movdqa %xmm0,%xmm8
pshufd $0,%xmm1,%xmm1
movq $16,%rax
L$select_loop_sse_w5:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
pcmpeqd %xmm1,%xmm15
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
movdqa 64(%rsi),%xmm13
movdqa 80(%rsi),%xmm14
leaq 96(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
pand %xmm15,%xmm13
por %xmm12,%xmm5
pand %xmm15,%xmm14
por %xmm13,%xmm6
por %xmm14,%xmm7
decq %rax
jnz L$select_loop_sse_w5
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
movdqu %xmm6,64(%rdi)
movdqu %xmm7,80(%rdi)
ret
L$SEH_end_ecp_nistz256_select_w5_nohw:
.globl _ecp_nistz256_select_w7_nohw
.private_extern _ecp_nistz256_select_w7_nohw
.p2align 5
_ecp_nistz256_select_w7_nohw:
_CET_ENDBR
movdqa L$One(%rip),%xmm8
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa %xmm8,%xmm0
pshufd $0,%xmm1,%xmm1
movq $64,%rax
L$select_loop_sse_w7:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
pcmpeqd %xmm1,%xmm15
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
leaq 64(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
prefetcht0 255(%rsi)
por %xmm12,%xmm5
decq %rax
jnz L$select_loop_sse_w7
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
ret
L$SEH_end_ecp_nistz256_select_w7_nohw:
.globl _ecp_nistz256_select_w5_avx2
.private_extern _ecp_nistz256_select_w5_avx2
.p2align 5
_ecp_nistz256_select_w5_avx2:
_CET_ENDBR
vzeroupper
vmovdqa L$Two(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vpxor %ymm4,%ymm4,%ymm4
vmovdqa L$One(%rip),%ymm5
vmovdqa L$Two(%rip),%ymm10
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $8,%rax
L$select_loop_avx2_w5:
vmovdqa 0(%rsi),%ymm6
vmovdqa 32(%rsi),%ymm7
vmovdqa 64(%rsi),%ymm8
vmovdqa 96(%rsi),%ymm11
vmovdqa 128(%rsi),%ymm12
vmovdqa 160(%rsi),%ymm13
vpcmpeqd %ymm1,%ymm5,%ymm9
vpcmpeqd %ymm1,%ymm10,%ymm14
vpaddd %ymm0,%ymm5,%ymm5
vpaddd %ymm0,%ymm10,%ymm10
leaq 192(%rsi),%rsi
vpand %ymm9,%ymm6,%ymm6
vpand %ymm9,%ymm7,%ymm7
vpand %ymm9,%ymm8,%ymm8
vpand %ymm14,%ymm11,%ymm11
vpand %ymm14,%ymm12,%ymm12
vpand %ymm14,%ymm13,%ymm13
vpxor %ymm6,%ymm2,%ymm2
vpxor %ymm7,%ymm3,%ymm3
vpxor %ymm8,%ymm4,%ymm4
vpxor %ymm11,%ymm2,%ymm2
vpxor %ymm12,%ymm3,%ymm3
vpxor %ymm13,%ymm4,%ymm4
decq %rax
jnz L$select_loop_avx2_w5
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,64(%rdi)
vzeroupper
ret
L$SEH_end_ecp_nistz256_select_w5_avx2:
.globl _ecp_nistz256_select_w7_avx2
.private_extern _ecp_nistz256_select_w7_avx2
.p2align 5
_ecp_nistz256_select_w7_avx2:
_CET_ENDBR
vzeroupper
vmovdqa L$Three(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vmovdqa L$One(%rip),%ymm4
vmovdqa L$Two(%rip),%ymm8
vmovdqa L$Three(%rip),%ymm12
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $21,%rax
L$select_loop_avx2_w7:
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm9
vmovdqa 96(%rsi),%ymm10
vmovdqa 128(%rsi),%ymm13
vmovdqa 160(%rsi),%ymm14
vpcmpeqd %ymm1,%ymm4,%ymm7
vpcmpeqd %ymm1,%ymm8,%ymm11
vpcmpeqd %ymm1,%ymm12,%ymm15
vpaddd %ymm0,%ymm4,%ymm4
vpaddd %ymm0,%ymm8,%ymm8
vpaddd %ymm0,%ymm12,%ymm12
leaq 192(%rsi),%rsi
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpand %ymm11,%ymm9,%ymm9
vpand %ymm11,%ymm10,%ymm10
vpand %ymm15,%ymm13,%ymm13
vpand %ymm15,%ymm14,%ymm14
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vpxor %ymm9,%ymm2,%ymm2
vpxor %ymm10,%ymm3,%ymm3
vpxor %ymm13,%ymm2,%ymm2
vpxor %ymm14,%ymm3,%ymm3
decq %rax
jnz L$select_loop_avx2_w7
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vpcmpeqd %ymm1,%ymm4,%ymm7
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vzeroupper
ret
L$SEH_end_ecp_nistz256_select_w7_avx2:
.p2align 5
__ecp_nistz256_add_toq:
xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sub_fromq:
subq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq %r11,%r11
addq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
testq %r11,%r11
cmovzq %rax,%r12
cmovzq %rbp,%r13
movq %r12,0(%rdi)
cmovzq %rcx,%r8
movq %r13,8(%rdi)
cmovzq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_subq:
subq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq %r11,%r11
addq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
testq %r11,%r11
cmovnzq %rax,%r12
cmovnzq %rbp,%r13
cmovnzq %rcx,%r8
cmovnzq %r10,%r9
ret
.p2align 5
__ecp_nistz256_mul_by_2q:
xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_point_double_nohw
.private_extern _ecp_nistz256_point_double_nohw
.p2align 5
_ecp_nistz256_point_double_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $160+8,%rsp
L$point_doubleq_body:
L$point_double_shortcutq:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq L$poly+8(%rip),%r14
movq L$poly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-0(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 32(%rbx),%rax
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-0(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montq
call __ecp_nistz256_mul_by_2q
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montq
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rax
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 0+32(%rsp),%rax
movq 8+32(%rsp),%r14
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montq
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subq
movq 32(%rsp),%rax
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-0(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromq
leaq 160+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_doubleq_epilogue:
ret
.globl _ecp_nistz256_point_add_nohw
.private_extern _ecp_nistz256_point_add_nohw
.p2align 5
_ecp_nistz256_point_add_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576+8,%rsp
L$point_addq_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rax
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-0(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 416(%rsp),%rax
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 512(%rsp),%rax
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq 0+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 480(%rsp),%rax
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz L$add_proceedq
testq %r9,%r9
jz L$add_doubleq
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp L$add_doneq
.p2align 5
L$add_doubleq:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
jmp L$point_double_shortcutq
.p2align 5
L$add_proceedq:
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq 0+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0(%rsp),%rax
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 160(%rsp),%rax
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
L$add_doneq:
leaq 576+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_addq_epilogue:
ret
.globl _ecp_nistz256_point_add_affine_nohw
.private_extern _ecp_nistz256_point_add_affine_nohw
.p2align 5
_ecp_nistz256_point_add_affine_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $480+8,%rsp
L$add_affineq_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-0(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rax
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-0(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+96(%rsp),%rax
movq 8+96(%rsp),%r14
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq 0+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rax
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq 0+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand L$ONE_mont(%rip),%xmm2
pand L$ONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$add_affineq_epilogue:
ret
.p2align 5
__ecp_nistz256_add_tox:
xorq %r11,%r11
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sub_fromx:
xorq %r11,%r11
sbbq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq $0,%r11
xorq %r10,%r10
adcq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
btq $0,%r11
cmovncq %rax,%r12
cmovncq %rbp,%r13
movq %r12,0(%rdi)
cmovncq %rcx,%r8
movq %r13,8(%rdi)
cmovncq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_subx:
xorq %r11,%r11
sbbq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq $0,%r11
xorq %r9,%r9
adcq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
btq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
cmovcq %rcx,%r8
cmovcq %r10,%r9
ret
.p2align 5
__ecp_nistz256_mul_by_2x:
xorq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_point_double_adx
.private_extern _ecp_nistz256_point_double_adx
.p2align 5
_ecp_nistz256_point_double_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $160+8,%rsp
L$point_doublex_body:
L$point_double_shortcutx:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq L$poly+8(%rip),%r14
movq L$poly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-128(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 32(%rbx),%rdx
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-128(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montx
call __ecp_nistz256_mul_by_2x
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montx
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rdx
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 0+32(%rsp),%rdx
movq 8+32(%rsp),%r14
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montx
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subx
movq 32(%rsp),%rdx
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-128(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromx
leaq 160+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_doublex_epilogue:
ret
.globl _ecp_nistz256_point_add_adx
.private_extern _ecp_nistz256_point_add_adx
.p2align 5
_ecp_nistz256_point_add_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576+8,%rsp
L$point_addx_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-128(%rsi),%rsi
movq %rdx,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rdx
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-128(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 416(%rsp),%rdx
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 512(%rsp),%rdx
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq -128+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 480(%rsp),%rdx
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz L$add_proceedx
testq %r9,%r9
jz L$add_doublex
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp L$add_donex
.p2align 5
L$add_doublex:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
jmp L$point_double_shortcutx
.p2align 5
L$add_proceedx:
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq -128+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%rdx
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 160(%rsp),%rdx
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
L$add_donex:
leaq 576+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_addx_epilogue:
ret
.globl _ecp_nistz256_point_add_affine_adx
.private_extern _ecp_nistz256_point_add_affine_adx
.p2align 5
_ecp_nistz256_point_add_affine_adx:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $480+8,%rsp
L$add_affinex_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-128(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rdx
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-128(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+96(%rsp),%rdx
movq 8+96(%rsp),%r14
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq -128+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rdx
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq -128+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand L$ONE_mont(%rip),%xmm2
pand L$ONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$add_affinex_epilogue:
ret
#endif
|
Chigozieworldwide/Multi | 25,166 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/vpaes-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 7 // totally strategic alignment
_vpaes_consts:
Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.align 4
_vpaes_encrypt_preheat:
adrp x10, Lk_inv@PAGE
add x10, x10, Lk_inv@PAGEOFF
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2
ret
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward@PAGE+16
add x11, x11, Lk_mc_forward@PAGEOFF+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Lenc_entry
.align 4
Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward@PAGE+16
add x11, x11, Lk_mc_forward@PAGEOFF+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Lenc_2x_entry
.align 4
Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.align 4
_vpaes_key_preheat:
adrp x10, Lk_inv@PAGE
add x10, x10, Lk_inv@PAGEOFF
movi v16.16b, #0x5b // Lk_s63
adrp x11, Lk_sb1@PAGE
add x11, x11, Lk_sb1@PAGEOFF
movi v17.16b, #0x0f // Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt
adrp x10, Lk_dksd@PAGE
add x10, x10, Lk_dksd@PAGEOFF
ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1
adrp x11, Lk_mc_forward@PAGE
add x11, x11, Lk_mc_forward@PAGEOFF
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9
ld1 {v8.2d}, [x10] // Lk_rcon
ld1 {v9.2d}, [x11] // Lk_mc_forward[0]
ret
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10
add x10, x10, Lk_sr@PAGEOFF
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi Lschedule_256
b.eq Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
Lschedule_128:
mov x0, #10 // mov $10, %esi
Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, Lk_deskew@PAGEOFF
cbnz w3, Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, Lk_opt@PAGEOFF
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.align 4
_vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.align 4
_vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls Lctr32_done
Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi Lctr32_loop
Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Chigozieworldwide/Multi | 15,121 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesni-x86-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.hidden _aesni_encrypt2
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L000enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L000enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.size _aesni_encrypt2,.-_aesni_encrypt2
.hidden _aesni_encrypt3
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L001enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L001enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.size _aesni_encrypt3,.-_aesni_encrypt3
.hidden _aesni_encrypt4
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L002enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L002enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.size _aesni_encrypt4,.-_aesni_encrypt4
.hidden _aesni_encrypt6
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L003_aesni_encrypt6_inner
.align 16
.L004enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.L003_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.L_aesni_encrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L004enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.size _aesni_encrypt6,.-_aesni_encrypt6
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.L_aes_hw_ctr32_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L005pic_for_function_hit
.L005pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+0-.L005pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $88,%esp
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
je .L006ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $6,%ecx
xorl %ebp,%ebp
movl %ecx,16(%esp)
movl %ecx,20(%esp)
movl %ecx,24(%esp)
movl %ebp,28(%esp)
.byte 102,15,58,22,251,3
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
.byte 102,15,58,34,205,0
incl %ebx
.byte 102,15,58,34,195,1
incl %ebp
.byte 102,15,58,34,205,1
incl %ebx
.byte 102,15,58,34,195,2
incl %ebp
.byte 102,15,58,34,205,2
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
movdqu (%edx),%xmm6
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
pshufd $192,%xmm0,%xmm2
pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
jb .L007ctr32_tail
pxor %xmm6,%xmm7
shll $4,%ecx
movl $16,%ebx
movdqa %xmm7,32(%esp)
movl %edx,%ebp
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
subl $6,%eax
jmp .L008ctr32_loop6
.align 16
.L008ctr32_loop6:
pshufd $64,%xmm0,%xmm4
movdqa 32(%esp),%xmm0
pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
pshufd $64,%xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
.byte 102,15,56,220,209
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
.byte 102,15,56,220,217
movups 32(%ebp),%xmm0
movl %ebx,%ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
movups %xmm6,64(%edi)
pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
pshufd $128,%xmm0,%xmm3
subl $6,%eax
jnc .L008ctr32_loop6
addl $6,%eax
jz .L009ctr32_ret
movdqu (%ebp),%xmm7
movl %ebp,%edx
pxor 32(%esp),%xmm7
movl 240(%ebp),%ecx
.L007ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
jb .L010ctr32_one
pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
je .L011ctr32_two
pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
jb .L012ctr32_three
pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
je .L013ctr32_four
por %xmm7,%xmm6
call _aesni_encrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups 48(%esi),%xmm0
xorps %xmm1,%xmm4
movups 64(%esi),%xmm1
xorps %xmm0,%xmm5
movups %xmm2,(%edi)
xorps %xmm1,%xmm6
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L009ctr32_ret
.align 16
.L006ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
.L010ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L014enc1_loop_1:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L014enc1_loop_1
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
jmp .L009ctr32_ret
.align 16
.L011ctr32_two:
call _aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L009ctr32_ret
.align 16
.L012ctr32_three:
call _aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
movups 32(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L009ctr32_ret
.align 16
.L013ctr32_four:
call _aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
movups 32(%esi),%xmm1
xorps %xmm6,%xmm2
movups 48(%esi),%xmm0
xorps %xmm7,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L009ctr32_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.L_aes_hw_set_encrypt_key_base_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L015pic_for_function_hit
.L015pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+3-.L015pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
pushl %ebx
call .L016pic
.L016pic:
popl %ebx
leal .Lkey_const-.L016pic(%ebx),%ebx
movups (%eax),%xmm0
xorps %xmm4,%xmm4
leal 16(%edx),%edx
cmpl $256,%ecx
je .L01714rounds
cmpl $128,%ecx
jne .L018bad_keybits
.align 16
.L01910rounds:
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
call .L020key_128_cold
.byte 102,15,58,223,200,2
call .L021key_128
.byte 102,15,58,223,200,4
call .L021key_128
.byte 102,15,58,223,200,8
call .L021key_128
.byte 102,15,58,223,200,16
call .L021key_128
.byte 102,15,58,223,200,32
call .L021key_128
.byte 102,15,58,223,200,64
call .L021key_128
.byte 102,15,58,223,200,128
call .L021key_128
.byte 102,15,58,223,200,27
call .L021key_128
.byte 102,15,58,223,200,54
call .L021key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
jmp .L022good_key
.align 16
.L021key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.L020key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L01714rounds:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
call .L023key_256a_cold
.byte 102,15,58,223,200,1
call .L024key_256b
.byte 102,15,58,223,202,2
call .L025key_256a
.byte 102,15,58,223,200,2
call .L024key_256b
.byte 102,15,58,223,202,4
call .L025key_256a
.byte 102,15,58,223,200,4
call .L024key_256b
.byte 102,15,58,223,202,8
call .L025key_256a
.byte 102,15,58,223,200,8
call .L024key_256b
.byte 102,15,58,223,202,16
call .L025key_256a
.byte 102,15,58,223,200,16
call .L024key_256b
.byte 102,15,58,223,202,32
call .L025key_256a
.byte 102,15,58,223,200,32
call .L024key_256b
.byte 102,15,58,223,202,64
call .L025key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
jmp .L022good_key
.align 16
.L025key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
.L023key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L024key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.L022good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
ret
.align 4
.L018bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
ret
.size aes_hw_set_encrypt_key_base,.-.L_aes_hw_set_encrypt_key_base_begin
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.L_aes_hw_set_encrypt_key_alt_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L026pic_for_function_hit
.L026pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+3-.L026pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
pushl %ebx
call .L027pic
.L027pic:
popl %ebx
leal .Lkey_const-.L027pic(%ebx),%ebx
movups (%eax),%xmm0
xorps %xmm4,%xmm4
leal 16(%edx),%edx
cmpl $256,%ecx
je .L02814rounds_alt
cmpl $128,%ecx
jne .L029bad_keybits
.align 16
.L03010rounds_alt:
movdqa (%ebx),%xmm5
movl $8,%ecx
movdqa 32(%ebx),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,-16(%edx)
.L031loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leal 16(%edx),%edx
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%edx)
movdqa %xmm0,%xmm2
decl %ecx
jnz .L031loop_key128
movdqa 48(%ebx),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%edx)
movl $9,%ecx
movl %ecx,96(%edx)
jmp .L032good_key
.align 16
.L02814rounds_alt:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
movdqa (%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $7,%ecx
movdqu %xmm0,-32(%edx)
movdqa %xmm2,%xmm1
movdqu %xmm2,-16(%edx)
.L033loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
decl %ecx
jz .L034done_key256
pshufd $255,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%edx)
leal 32(%edx),%edx
movdqa %xmm2,%xmm1
jmp .L033loop_key256
.L034done_key256:
movl $13,%ecx
movl %ecx,16(%edx)
.L032good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
ret
.align 4
.L029bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
ret
.size aes_hw_set_encrypt_key_alt,.-.L_aes_hw_set_encrypt_key_alt_begin
.align 64
.Lkey_const:
.long 202313229,202313229,202313229,202313229
.long 67569157,67569157,67569157,67569157
.long 1,1,1,1
.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
Chigozieworldwide/Multi | 70,675 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/sha256-x86_64-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,@function
.align 16
sha256_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop
.align 16
.Lloop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz .Lrounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha256_block_data_order_hw
.hidden sha256_block_data_order_hw
.type sha256_block_data_order_hw,@function
.align 64
sha256_block_data_order_hw:
.cfi_startproc
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz .Loop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.cfi_endproc
.size sha256_block_data_order_hw,.-sha256_block_data_order_hw
.globl sha256_block_data_order_ssse3
.hidden sha256_block_data_order_ssse3
.type sha256_block_data_order_ssse3,@function
.align 64
sha256_block_data_order_ssse3:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_ssse3
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3:
ret
.cfi_endproc
.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3
.globl sha256_block_data_order_avx
.hidden sha256_block_data_order_avx
.type sha256_block_data_order_avx,@function
.align 64
sha256_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lavx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_avx
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha256_block_data_order_avx,.-sha256_block_data_order_avx
#endif
|
Chigozieworldwide/Multi | 82,152 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesv8-gcm-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#if __ARM_MAX_ARCH__ >= 8
.text
.globl _aes_gcm_enc_kernel
.private_extern _aes_gcm_enc_kernel
.align 4
_aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge Lenc_prepretail // do prepretail
Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt Lenc_main_loop
Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt Lenc_blocks_more_than_1
sub w12, w12, #1
b Lenc_blocks_less_than_1
Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _aes_gcm_dec_kernel
.private_extern _aes_gcm_dec_kernel
.align 4
_aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge Ldec_prepretail // do prepretail
Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt Ldec_main_loop
Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt Ldec_blocks_more_than_1
sub w12, w12, #1
b Ldec_blocks_less_than_1
Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Chigozieworldwide/Multi | 48,620 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/x86_64-mont5-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _bn_mul4x_mont_gather5
.private_extern _bn_mul4x_mont_gather5
.p2align 5
_bn_mul4x_mont_gather5:
_CET_ENDBR
.byte 0x67
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mul4x_prologue:
.byte 0x67
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$mul4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$mul4xsp_done
.p2align 5
L$mul4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$mul4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mul4x_page_walk
jmp L$mul4x_page_walk_done
L$mul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mul4x_page_walk
L$mul4x_page_walk_done:
negq %r9
movq %rax,40(%rsp)
L$mul4x_body:
call mul4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul4x_epilogue:
ret
.p2align 5
mul4x_internal:
shlq $5,%r9
movd 8(%rax),%xmm5
leaq L$inc(%rip),%rax
leaq 128(%rdx,%r9,1),%r13
shrq $5,%r9
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r9,1),%r10
leaq 128(%rdx),%r12
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67,0x67
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
.byte 0x67
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
.byte 0x67
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%r12),%xmm0
pand 80(%r12),%xmm1
pand 96(%r12),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%r12),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%r12),%xmm4
movdqa -112(%r12),%xmm5
movdqa -96(%r12),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%r12),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%r12),%xmm4
movdqa -48(%r12),%xmm5
movdqa -32(%r12),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%r12),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%r12),%xmm4
movdqa 16(%r12),%xmm5
movdqa 32(%r12),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%r12),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
por %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq %r13,16+8(%rsp)
movq %rdi,56+8(%rsp)
movq (%r8),%r8
movq (%rsi),%rax
leaq (%rsi,%r9,1),%rsi
negq %r9
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
leaq 64+8(%rsp),%r14
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
jmp L$1st4x
.p2align 5
L$1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
addq $32,%r15
jnz L$1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%r14)
jmp L$outer4x
.p2align 5
L$outer4x:
leaq 16+128(%r14),%rdx
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r12),%xmm0
movdqa -112(%r12),%xmm1
movdqa -96(%r12),%xmm2
movdqa -80(%r12),%xmm3
pand -128(%rdx),%xmm0
pand -112(%rdx),%xmm1
por %xmm0,%xmm4
pand -96(%rdx),%xmm2
por %xmm1,%xmm5
pand -80(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r12),%xmm0
movdqa -48(%r12),%xmm1
movdqa -32(%r12),%xmm2
movdqa -16(%r12),%xmm3
pand -64(%rdx),%xmm0
pand -48(%rdx),%xmm1
por %xmm0,%xmm4
pand -32(%rdx),%xmm2
por %xmm1,%xmm5
pand -16(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r12),%xmm0
movdqa 16(%r12),%xmm1
movdqa 32(%r12),%xmm2
movdqa 48(%r12),%xmm3
pand 0(%rdx),%xmm0
pand 16(%rdx),%xmm1
por %xmm0,%xmm4
pand 32(%rdx),%xmm2
por %xmm1,%xmm5
pand 48(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r12),%xmm0
movdqa 80(%r12),%xmm1
movdqa 96(%r12),%xmm2
movdqa 112(%r12),%xmm3
pand 64(%rdx),%xmm0
pand 80(%rdx),%xmm1
por %xmm0,%xmm4
pand 96(%rdx),%xmm2
por %xmm1,%xmm5
pand 112(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq (%r14,%r9,1),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
movq %rdi,(%r14)
leaq (%r14,%r9,1),%r14
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdx,%r13
jmp L$inner4x
.p2align 5
L$inner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
addq (%r14),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%r13
addq $32,%r15
jnz L$inner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq %rbp,%rax
movq -8(%rcx),%rbp
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
movq %rdi,-16(%r14)
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%r14),%r13
adcq $0,%rdi
movq %r13,-8(%r14)
cmpq 16+8(%rsp),%r12
jb L$outer4x
xorq %rax,%rax
subq %r13,%rbp
adcq %r15,%r15
orq %r15,%rdi
subq %rdi,%rax
leaq (%r14,%r9,1),%rbx
movq (%rcx),%r12
leaq (%rcx),%rbp
movq %r9,%rcx
sarq $3+2,%rcx
movq 56+8(%rsp),%rdi
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.globl _bn_power5_nohw
.private_extern _bn_power5_nohw
.p2align 5
_bn_power5_nohw:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$power5_prologue:
shll $3,%r9d
leal (%r9,%r9,2),%r10d
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$pwr_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$pwr_sp_done
.p2align 5
L$pwr_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$pwr_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwr_page_walk
jmp L$pwr_page_walk_done
L$pwr_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwr_page_walk
L$pwr_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$power5_body:
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq %rsi,%rdi
movq 40(%rsp),%rax
leaq 32(%rsp),%r8
call mul4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$power5_epilogue:
ret
.globl _bn_sqr8x_internal
.private_extern _bn_sqr8x_internal
.private_extern _bn_sqr8x_internal
.p2align 5
_bn_sqr8x_internal:
__bn_sqr8x_internal:
_CET_ENDBR
leaq 32(%r10),%rbp
leaq (%rsi,%r9,1),%rsi
movq %r9,%rcx
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
movq %r10,-24(%rdi,%rbp,1)
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
movq %r11,-16(%rdi,%rbp,1)
movq %rdx,%r10
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
movq %rax,%r12
movq %rbx,%rax
movq %rdx,%r13
leaq (%rbp),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
jmp L$sqr4x_1st
.p2align 5
L$sqr4x_1st:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq 16(%rsi,%rcx,1),%rbx
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %r10,8(%rdi,%rcx,1)
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 24(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,16(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
leaq 32(%rcx),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne L$sqr4x_1st
mulq %r15
addq %rax,%r13
leaq 16(%rbp),%rbp
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
jmp L$sqr4x_outer
.p2align 5
L$sqr4x_outer:
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq -24(%rdi,%rbp,1),%r10
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
movq %r10,-24(%rdi,%rbp,1)
movq %rdx,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
addq -16(%rdi,%rbp,1),%r11
movq %rdx,%r10
adcq $0,%r10
movq %r11,-16(%rdi,%rbp,1)
xorq %r12,%r12
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
addq %rax,%r12
movq %rbx,%rax
adcq $0,%rdx
addq -8(%rdi,%rbp,1),%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rbp,1)
leaq (%rbp),%rcx
jmp L$sqr4x_inner
.p2align 5
L$sqr4x_inner:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
addq (%rdi,%rcx,1),%r13
adcq $0,%r12
.byte 0x67
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %r11,(%rdi,%rcx,1)
movq %rbx,%rax
movq %rdx,%r13
adcq $0,%r13
addq 8(%rdi,%rcx,1),%r12
leaq 16(%rcx),%rcx
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne L$sqr4x_inner
.byte 0x67
mulq %r15
addq %rax,%r13
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
addq $16,%rbp
jnz L$sqr4x_outer
movq -32(%rsi),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi),%rbx
movq %rax,%r15
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq %r10,-24(%rdi)
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
movq -8(%rsi),%rbx
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,-16(%rdi)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi)
mulq %r15
addq %rax,%r13
movq -16(%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
mulq %rbx
addq $16,%rbp
xorq %r14,%r14
subq %r9,%rbp
xorq %r15,%r15
addq %r12,%rax
adcq $0,%rdx
movq %rax,8(%rdi)
movq %rdx,16(%rdi)
movq %r15,24(%rdi)
movq -16(%rsi,%rbp,1),%rax
leaq 48+8(%rsp),%rdi
xorq %r10,%r10
movq 8(%rdi),%r11
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
leaq 16(%rbp),%rbp
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
jmp L$sqr4x_shift_n_add
.p2align 5
L$sqr4x_shift_n_add:
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 0(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 8(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,-16(%rdi)
adcq %rdx,%r8
leaq (%r14,%r10,2),%r12
movq %r8,-8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq 8(%rsi,%rbp,1),%rax
movq %r12,0(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 16(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
addq $32,%rbp
jnz L$sqr4x_shift_n_add
leaq (%r14,%r10,2),%r12
.byte 0x67
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
mulq %rax
negq %r15
adcq %rax,%rbx
adcq %rdx,%r8
movq %rbx,-16(%rdi)
movq %r8,-8(%rdi)
.byte 102,72,15,126,213
__bn_sqr8x_reduction:
xorq %rax,%rax
leaq (%r9,%rbp,1),%rcx
leaq 48+8(%rsp,%r9,2),%rdx
movq %rcx,0+8(%rsp)
leaq 48+8(%rsp,%r9,1),%rdi
movq %rdx,8+8(%rsp)
negq %r9
jmp L$8x_reduction_loop
.p2align 5
L$8x_reduction_loop:
leaq (%rdi,%r9,1),%rdi
.byte 0x66
movq 0(%rdi),%rbx
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,(%rdx)
leaq 64(%rdi),%rdi
.byte 0x67
movq %rbx,%r8
imulq 32+8(%rsp),%rbx
movq 0(%rbp),%rax
movl $8,%ecx
jmp L$8x_reduce
.p2align 5
L$8x_reduce:
mulq %rbx
movq 8(%rbp),%rax
negq %r8
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
movq %rbx,48-8+8(%rsp,%rcx,8)
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq 32+8(%rsp),%rsi
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
imulq %r8,%rsi
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq %rsi,%rbx
addq %rax,%r15
movq 0(%rbp),%rax
adcq $0,%rdx
addq %r15,%r14
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz L$8x_reduce
leaq 64(%rbp),%rbp
xorq %rax,%rax
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae L$8x_no_tail
.byte 0x66
addq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movq 48+56+8(%rsp),%rbx
movl $8,%ecx
movq 0(%rbp),%rax
jmp L$8x_tail
.p2align 5
L$8x_tail:
mulq %rbx
addq %rax,%r8
movq 8(%rbp),%rax
movq %r8,(%rdi)
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
leaq 8(%rdi),%rdi
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq 48-16+8(%rsp,%rcx,8),%rbx
addq %rax,%r15
adcq $0,%rdx
addq %r15,%r14
movq 0(%rbp),%rax
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz L$8x_tail
leaq 64(%rbp),%rbp
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae L$8x_tail_done
movq 48+56+8(%rsp),%rbx
negq %rsi
movq 0(%rbp),%rax
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movl $8,%ecx
jmp L$8x_tail
.p2align 5
L$8x_tail_done:
xorq %rax,%rax
addq (%rdx),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
negq %rsi
L$8x_no_tail:
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq -8(%rbp),%rcx
xorq %rsi,%rsi
.byte 102,72,15,126,213
movq %r8,0(%rdi)
movq %r9,8(%rdi)
.byte 102,73,15,126,217
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi),%rdi
cmpq %rdx,%rdi
jb L$8x_reduction_loop
ret
.p2align 5
__bn_post4x_internal:
movq 0(%rbp),%r12
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
.byte 102,72,15,126,207
negq %rax
.byte 102,72,15,126,206
sarq $3+2,%rcx
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.p2align 4
L$sqr4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
L$sqr4x_sub_entry:
leaq 32(%rbp),%rbp
notq %r12
notq %r13
notq %r14
notq %r15
andq %rax,%r12
andq %rax,%r13
andq %rax,%r14
andq %rax,%r15
negq %r10
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
adcq 16(%rbx),%r14
adcq 24(%rbx),%r15
movq %r12,0(%rdi)
leaq 32(%rbx),%rbx
movq %r13,8(%rdi)
sbbq %r10,%r10
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr4x_sub
movq %r9,%r10
negq %r9
ret
.globl _bn_mulx4x_mont_gather5
.private_extern _bn_mulx4x_mont_gather5
.p2align 5
_bn_mulx4x_mont_gather5:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx4x_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$mulx4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$mulx4xsp_done
L$mulx4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$mulx4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
jmp L$mulx4x_page_walk_done
L$mulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
L$mulx4x_page_walk_done:
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$mulx4x_body:
call mulx4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mulx4x_epilogue:
ret
.p2align 5
mulx4x_internal:
movq %r9,8(%rsp)
movq %r9,%r10
negq %r9
shlq $5,%r9
negq %r10
leaq 128(%rdx,%r9,1),%r13
shrq $5+5,%r9
movd 8(%rax),%xmm5
subq $1,%r9
leaq L$inc(%rip),%rax
movq %r13,16+8(%rsp)
movq %r9,24+8(%rsp)
movq %rdi,56+8(%rsp)
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r10,1),%r10
leaq 128(%rdx),%rdi
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67
movdqa %xmm1,%xmm2
.byte 0x67
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
.byte 0x67
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%rdi),%xmm0
pand 80(%rdi),%xmm1
pand 96(%rdi),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%rdi),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%rdi),%xmm4
movdqa -112(%rdi),%xmm5
movdqa -96(%rdi),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%rdi),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%rdi),%xmm4
movdqa -48(%rdi),%xmm5
movdqa -32(%rdi),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%rdi),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%rdi),%xmm4
movdqa 16(%rdi),%xmm5
movdqa 32(%rdi),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%rdi),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
pxor %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
leaq 64+32+8(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r12
addq %rax,%r11
mulxq 16(%rsi),%rax,%r13
adcq %rax,%r12
adcq $0,%r13
mulxq 24(%rsi),%rax,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
xorq %rbp,%rbp
movq %r8,%rdx
movq %rdi,8+8(%rsp)
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp L$mulx4x_1st
.p2align 5
L$mulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_1st
movq 8(%rsp),%rax
adcq %rbp,%r15
leaq (%rsi,%rax,1),%rsi
addq %r15,%r14
movq 8+8(%rsp),%rdi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
jmp L$mulx4x_outer
.p2align 5
L$mulx4x_outer:
leaq 16-256(%rbx),%r10
pxor %xmm4,%xmm4
.byte 0x67,0x67
pxor %xmm5,%xmm5
movdqa -128(%rdi),%xmm0
movdqa -112(%rdi),%xmm1
movdqa -96(%rdi),%xmm2
pand 256(%r10),%xmm0
movdqa -80(%rdi),%xmm3
pand 272(%r10),%xmm1
por %xmm0,%xmm4
pand 288(%r10),%xmm2
por %xmm1,%xmm5
pand 304(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%rdi),%xmm0
movdqa -48(%rdi),%xmm1
movdqa -32(%rdi),%xmm2
pand 320(%r10),%xmm0
movdqa -16(%rdi),%xmm3
pand 336(%r10),%xmm1
por %xmm0,%xmm4
pand 352(%r10),%xmm2
por %xmm1,%xmm5
pand 368(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%rdi),%xmm0
movdqa 16(%rdi),%xmm1
movdqa 32(%rdi),%xmm2
pand 384(%r10),%xmm0
movdqa 48(%rdi),%xmm3
pand 400(%r10),%xmm1
por %xmm0,%xmm4
pand 416(%r10),%xmm2
por %xmm1,%xmm5
pand 432(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%rdi),%xmm0
movdqa 80(%rdi),%xmm1
movdqa 96(%rdi),%xmm2
pand 448(%r10),%xmm0
movdqa 112(%rdi),%xmm3
pand 464(%r10),%xmm1
por %xmm0,%xmm4
pand 480(%r10),%xmm2
por %xmm1,%xmm5
pand 496(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
movq %rbp,(%rbx)
leaq 32(%rbx,%rax,1),%rbx
mulxq 0(%rsi),%r8,%r11
xorq %rbp,%rbp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
mulxq 24(%rsi),%rdx,%r14
adoxq -16(%rbx),%r12
adcxq %rdx,%r13
leaq (%rcx,%rax,1),%rcx
leaq 32(%rsi),%rsi
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
adoxq %rbp,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
movq %r8,%rdx
xorq %rbp,%rbp
movq %rdi,8+8(%rsp)
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r12
movq %r11,-24(%rbx)
adoxq %rbp,%r15
movq %r12,-16(%rbx)
leaq 32(%rcx),%rcx
jmp L$mulx4x_inner
.p2align 5
L$mulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
movq %r11,-32(%rbx)
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
leaq 32(%rcx),%rcx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_inner
movq 0+8(%rsp),%rax
adcq %rbp,%r15
subq 0(%rbx),%rdi
movq 8+8(%rsp),%rdi
movq 16+8(%rsp),%r10
adcq %r15,%r14
leaq (%rsi,%rax,1),%rsi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
cmpq %r10,%rdi
jb L$mulx4x_outer
movq -8(%rcx),%r10
movq %rbp,%r8
movq (%rcx,%rax,1),%r12
leaq (%rcx,%rax,1),%rbp
movq %rax,%rcx
leaq (%rbx,%rax,1),%rdi
xorl %eax,%eax
xorq %r15,%r15
subq %r14,%r10
adcq %r15,%r15
orq %r15,%r8
sarq $3+2,%rcx
subq %r8,%rax
movq 56+8(%rsp),%rdx
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqrx4x_sub_entry
.globl _bn_powerx5
.private_extern _bn_powerx5
.p2align 5
_bn_powerx5:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$powerx5_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$pwrx_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$pwrx_sp_done
.p2align 5
L$pwrx_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$pwrx_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwrx_page_walk
jmp L$pwrx_page_walk_done
L$pwrx_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwrx_page_walk
L$pwrx_page_walk_done:
movq %r9,%r10
negq %r9
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$powerx5_body:
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
movq %r10,%r9
movq %rsi,%rdi
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq 40(%rsp),%rax
call mulx4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$powerx5_epilogue:
ret
.globl _bn_sqrx8x_internal
.private_extern _bn_sqrx8x_internal
.private_extern _bn_sqrx8x_internal
.p2align 5
_bn_sqrx8x_internal:
__bn_sqrx8x_internal:
_CET_ENDBR
leaq 48+8(%rsp),%rdi
leaq (%rsi,%r9,1),%rbp
movq %r9,0+8(%rsp)
movq %rbp,8+8(%rsp)
jmp L$sqr8x_zero_start
.p2align 5
.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
L$sqrx8x_zero:
.byte 0x3e
movdqa %xmm0,0(%rdi)
movdqa %xmm0,16(%rdi)
movdqa %xmm0,32(%rdi)
movdqa %xmm0,48(%rdi)
L$sqr8x_zero_start:
movdqa %xmm0,64(%rdi)
movdqa %xmm0,80(%rdi)
movdqa %xmm0,96(%rdi)
movdqa %xmm0,112(%rdi)
leaq 128(%rdi),%rdi
subq $64,%r9
jnz L$sqrx8x_zero
movq 0(%rsi),%rdx
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
leaq 48+8(%rsp),%rdi
xorq %rbp,%rbp
jmp L$sqrx8x_outer_loop
.p2align 5
L$sqrx8x_outer_loop:
mulxq 8(%rsi),%r8,%rax
adcxq %r9,%r8
adoxq %rax,%r10
mulxq 16(%rsi),%r9,%rax
adcxq %r10,%r9
adoxq %rax,%r11
.byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00
adcxq %r11,%r10
adoxq %rax,%r12
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00
adcxq %r12,%r11
adoxq %rax,%r13
mulxq 40(%rsi),%r12,%rax
adcxq %r13,%r12
adoxq %rax,%r14
mulxq 48(%rsi),%r13,%rax
adcxq %r14,%r13
adoxq %r15,%rax
mulxq 56(%rsi),%r14,%r15
movq 8(%rsi),%rdx
adcxq %rax,%r14
adoxq %rbp,%r15
adcq 64(%rdi),%r15
movq %r8,8(%rdi)
movq %r9,16(%rdi)
sbbq %rcx,%rcx
xorq %rbp,%rbp
mulxq 16(%rsi),%r8,%rbx
mulxq 24(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 32(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %rbx,%r11
.byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00
adcxq %r13,%r11
adoxq %r14,%r12
.byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00
movq 16(%rsi),%rdx
adcxq %rax,%r12
adoxq %rbx,%r13
adcxq %r15,%r13
adoxq %rbp,%r14
adcxq %rbp,%r14
movq %r8,24(%rdi)
movq %r9,32(%rdi)
mulxq 24(%rsi),%r8,%rbx
mulxq 32(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 40(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %r13,%r11
.byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00
.byte 0x3e
movq 24(%rsi),%rdx
adcxq %rbx,%r11
adoxq %rax,%r12
adcxq %r14,%r12
movq %r8,40(%rdi)
movq %r9,48(%rdi)
mulxq 32(%rsi),%r8,%rax
adoxq %rbp,%r13
adcxq %rbp,%r13
mulxq 40(%rsi),%r9,%rbx
adcxq %r10,%r8
adoxq %rax,%r9
mulxq 48(%rsi),%r10,%rax
adcxq %r11,%r9
adoxq %r12,%r10
mulxq 56(%rsi),%r11,%r12
movq 32(%rsi),%rdx
movq 40(%rsi),%r14
adcxq %rbx,%r10
adoxq %rax,%r11
movq 48(%rsi),%r15
adcxq %r13,%r11
adoxq %rbp,%r12
adcxq %rbp,%r12
movq %r8,56(%rdi)
movq %r9,64(%rdi)
mulxq %r14,%r9,%rax
movq 56(%rsi),%r8
adcxq %r10,%r9
mulxq %r15,%r10,%rbx
adoxq %rax,%r10
adcxq %r11,%r10
mulxq %r8,%r11,%rax
movq %r14,%rdx
adoxq %rbx,%r11
adcxq %r12,%r11
adcxq %rbp,%rax
mulxq %r15,%r14,%rbx
mulxq %r8,%r12,%r13
movq %r15,%rdx
leaq 64(%rsi),%rsi
adcxq %r14,%r11
adoxq %rbx,%r12
adcxq %rax,%r12
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %r8,%r8,%r14
adcxq %r8,%r13
adcxq %rbp,%r14
cmpq 8+8(%rsp),%rsi
je L$sqrx8x_outer_break
negq %rcx
movq $-8,%rcx
movq %rbp,%r15
movq 64(%rdi),%r8
adcxq 72(%rdi),%r9
adcxq 80(%rdi),%r10
adcxq 88(%rdi),%r11
adcq 96(%rdi),%r12
adcq 104(%rdi),%r13
adcq 112(%rdi),%r14
adcq 120(%rdi),%r15
leaq (%rsi),%rbp
leaq 128(%rdi),%rdi
sbbq %rax,%rax
movq -64(%rsi),%rdx
movq %rax,16+8(%rsp)
movq %rdi,24+8(%rsp)
xorl %eax,%eax
jmp L$sqrx8x_loop
.p2align 5
L$sqrx8x_loop:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
movq %rbx,(%rdi,%rcx,8)
movl $0,%ebx
adcxq %rax,%r13
adoxq %r15,%r14
.byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00
movq 8(%rsi,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rbx,%r15
adcxq %rbx,%r15
.byte 0x67
incq %rcx
jnz L$sqrx8x_loop
leaq 64(%rbp),%rbp
movq $-8,%rcx
cmpq 8+8(%rsp),%rbp
je L$sqrx8x_break
subq 16+8(%rsp),%rbx
.byte 0x66
movq -64(%rsi),%rdx
adcxq 0(%rdi),%r8
adcxq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
.byte 0x67
sbbq %rax,%rax
xorl %ebx,%ebx
movq %rax,16+8(%rsp)
jmp L$sqrx8x_loop
.p2align 5
L$sqrx8x_break:
xorq %rbp,%rbp
subq 16+8(%rsp),%rbx
adcxq %rbp,%r8
movq 24+8(%rsp),%rcx
adcxq %rbp,%r9
movq 0(%rsi),%rdx
adcq $0,%r10
movq %r8,0(%rdi)
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
cmpq %rcx,%rdi
je L$sqrx8x_outer_loop
movq %r9,8(%rdi)
movq 8(%rcx),%r9
movq %r10,16(%rdi)
movq 16(%rcx),%r10
movq %r11,24(%rdi)
movq 24(%rcx),%r11
movq %r12,32(%rdi)
movq 32(%rcx),%r12
movq %r13,40(%rdi)
movq 40(%rcx),%r13
movq %r14,48(%rdi)
movq 48(%rcx),%r14
movq %r15,56(%rdi)
movq 56(%rcx),%r15
movq %rcx,%rdi
jmp L$sqrx8x_outer_loop
.p2align 5
L$sqrx8x_outer_break:
movq %r9,72(%rdi)
.byte 102,72,15,126,217
movq %r10,80(%rdi)
movq %r11,88(%rdi)
movq %r12,96(%rdi)
movq %r13,104(%rdi)
movq %r14,112(%rdi)
leaq 48+8(%rsp),%rdi
movq (%rsi,%rcx,1),%rdx
movq 8(%rdi),%r11
xorq %r10,%r10
movq 0+8(%rsp),%r9
adoxq %r11,%r11
movq 16(%rdi),%r12
movq 24(%rdi),%r13
.p2align 5
L$sqrx4x_shift_n_add:
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
.byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00
.byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00
adoxq %r13,%r13
adcxq %r11,%rbx
movq 40(%rdi),%r11
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
movq 16(%rsi,%rcx,1),%rdx
movq 48(%rdi),%r12
adoxq %r11,%r11
adcxq %r13,%rbx
movq 56(%rdi),%r13
movq %rax,16(%rdi)
movq %rbx,24(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
movq 24(%rsi,%rcx,1),%rdx
leaq 32(%rcx),%rcx
movq 64(%rdi),%r10
adoxq %r13,%r13
adcxq %r11,%rbx
movq 72(%rdi),%r11
movq %rax,32(%rdi)
movq %rbx,40(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
jrcxz L$sqrx4x_shift_n_add_break
.byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00
adoxq %r11,%r11
adcxq %r13,%rbx
movq 80(%rdi),%r12
movq 88(%rdi),%r13
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
nop
jmp L$sqrx4x_shift_n_add
.p2align 5
L$sqrx4x_shift_n_add_break:
adcxq %r13,%rbx
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
.byte 102,72,15,126,213
__bn_sqrx8x_reduction:
xorl %eax,%eax
movq 32+8(%rsp),%rbx
movq 48+8(%rsp),%rdx
leaq -64(%rbp,%r9,1),%rcx
movq %rcx,0+8(%rsp)
movq %rdi,8+8(%rsp)
leaq 48+8(%rsp),%rdi
jmp L$sqrx8x_reduction_loop
.p2align 5
L$sqrx8x_reduction_loop:
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq %rdx,%r8
imulq %rbx,%rdx
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,24+8(%rsp)
leaq 64(%rdi),%rdi
xorq %rsi,%rsi
movq $-8,%rcx
jmp L$sqrx8x_reduce
.p2align 5
L$sqrx8x_reduce:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rbx,%rax
adoxq %r9,%r8
mulxq 8(%rbp),%rbx,%r9
adcxq %rbx,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rbx,%r10
adcxq %rbx,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rbx,%r11
adcxq %rbx,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00
movq %rdx,%rax
movq %r8,%rdx
adcxq %rbx,%r11
adoxq %r13,%r12
mulxq 32+8(%rsp),%rbx,%rdx
movq %rax,%rdx
movq %rax,64+48+8(%rsp,%rcx,8)
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq %rbx,%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
adcxq %rsi,%r15
.byte 0x67,0x67,0x67
incq %rcx
jnz L$sqrx8x_reduce
movq %rsi,%rax
cmpq 0+8(%rsp),%rbp
jae L$sqrx8x_no_tail
movq 48+8(%rsp),%rdx
addq 0(%rdi),%r8
leaq 64(%rbp),%rbp
movq $-8,%rcx
adcxq 8(%rdi),%r9
adcxq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp L$sqrx8x_tail
.p2align 5
L$sqrx8x_tail:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq 72+48+8(%rsp,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
movq %rbx,(%rdi,%rcx,8)
movq %r8,%rbx
adcxq %rsi,%r15
incq %rcx
jnz L$sqrx8x_tail
cmpq 0+8(%rsp),%rbp
jae L$sqrx8x_tail_done
subq 16+8(%rsp),%rsi
movq 48+8(%rsp),%rdx
leaq 64(%rbp),%rbp
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
subq $8,%rcx
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp L$sqrx8x_tail
.p2align 5
L$sqrx8x_tail_done:
xorq %rax,%rax
addq 24+8(%rsp),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
subq 16+8(%rsp),%rsi
L$sqrx8x_no_tail:
adcq 0(%rdi),%r8
.byte 102,72,15,126,217
adcq 8(%rdi),%r9
movq 56(%rbp),%rsi
.byte 102,72,15,126,213
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq 32+8(%rsp),%rbx
movq 64(%rdi,%rcx,1),%rdx
movq %r8,0(%rdi)
leaq 64(%rdi),%r8
movq %r9,8(%rdi)
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi,%rcx,1),%rdi
cmpq 8+8(%rsp),%r8
jb L$sqrx8x_reduction_loop
ret
.p2align 5
__bn_postx4x_internal:
movq 0(%rbp),%r12
movq %rcx,%r10
movq %rcx,%r9
negq %rax
sarq $3+2,%rcx
.byte 102,72,15,126,202
.byte 102,72,15,126,206
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqrx4x_sub_entry
.p2align 4
L$sqrx4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
L$sqrx4x_sub_entry:
andnq %rax,%r12,%r12
leaq 32(%rbp),%rbp
andnq %rax,%r13,%r13
andnq %rax,%r14,%r14
andnq %rax,%r15,%r15
negq %r8
adcq 0(%rdi),%r12
adcq 8(%rdi),%r13
adcq 16(%rdi),%r14
adcq 24(%rdi),%r15
movq %r12,0(%rdx)
leaq 32(%rdi),%rdi
movq %r13,8(%rdx)
sbbq %r8,%r8
movq %r14,16(%rdx)
movq %r15,24(%rdx)
leaq 32(%rdx),%rdx
incq %rcx
jnz L$sqrx4x_sub
negq %r9
ret
.globl _bn_scatter5
.private_extern _bn_scatter5
.p2align 4
_bn_scatter5:
_CET_ENDBR
cmpl $0,%esi
jz L$scatter_epilogue
leaq (%rdx,%rcx,8),%rdx
L$scatter:
movq (%rdi),%rax
leaq 8(%rdi),%rdi
movq %rax,(%rdx)
leaq 256(%rdx),%rdx
subl $1,%esi
jnz L$scatter
L$scatter_epilogue:
ret
.globl _bn_gather5
.private_extern _bn_gather5
.p2align 5
_bn_gather5:
L$SEH_begin_bn_gather5:
_CET_ENDBR
.byte 0x4c,0x8d,0x14,0x24
.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00
leaq L$inc(%rip),%rax
andq $-16,%rsp
movd %ecx,%xmm5
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 128(%rdx),%r11
leaq 128(%rsp),%rax
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-128(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-112(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-96(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-80(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-48(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-16(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,0(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,16(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,48(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,80(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,96(%rax)
movdqa %xmm4,%xmm2
movdqa %xmm3,112(%rax)
jmp L$gather
.p2align 5
L$gather:
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r11),%xmm0
movdqa -112(%r11),%xmm1
movdqa -96(%r11),%xmm2
pand -128(%rax),%xmm0
movdqa -80(%r11),%xmm3
pand -112(%rax),%xmm1
por %xmm0,%xmm4
pand -96(%rax),%xmm2
por %xmm1,%xmm5
pand -80(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r11),%xmm0
movdqa -48(%r11),%xmm1
movdqa -32(%r11),%xmm2
pand -64(%rax),%xmm0
movdqa -16(%r11),%xmm3
pand -48(%rax),%xmm1
por %xmm0,%xmm4
pand -32(%rax),%xmm2
por %xmm1,%xmm5
pand -16(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r11),%xmm0
movdqa 16(%r11),%xmm1
movdqa 32(%r11),%xmm2
pand 0(%rax),%xmm0
movdqa 48(%r11),%xmm3
pand 16(%rax),%xmm1
por %xmm0,%xmm4
pand 32(%rax),%xmm2
por %xmm1,%xmm5
pand 48(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r11),%xmm0
movdqa 80(%r11),%xmm1
movdqa 96(%r11),%xmm2
pand 64(%rax),%xmm0
movdqa 112(%r11),%xmm3
pand 80(%rax),%xmm1
por %xmm0,%xmm4
pand 96(%rax),%xmm2
por %xmm1,%xmm5
pand 112(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
leaq 256(%r11),%r11
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
movq %xmm0,(%rdi)
leaq 8(%rdi),%rdi
subl $1,%esi
jnz L$gather
leaq (%r10),%rsp
ret
L$SEH_end_bn_gather5:
.section __DATA,__const
.p2align 6
L$inc:
.long 0,0, 1,1
.long 2,2, 2,2
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
#endif
|
Chigozieworldwide/Multi | 82,352 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/aesv8-gcm-armv8-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__ >= 8
.arch armv8-a+crypto
.text
.globl aes_gcm_enc_kernel
.hidden aes_gcm_enc_kernel
.type aes_gcm_enc_kernel,%function
.align 4
aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt .Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq .Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
.Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge .Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge .Lenc_prepretail // do prepretail
.Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt .Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq .Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
.Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // .LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt .Lenc_main_loop
.Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt .Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq .Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
.Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
.Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt .Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt .Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt .Lenc_blocks_more_than_1
sub w12, w12, #1
b .Lenc_blocks_less_than_1
.Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
.Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
.Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
.Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size aes_gcm_enc_kernel,.-aes_gcm_enc_kernel
.globl aes_gcm_dec_kernel
.hidden aes_gcm_dec_kernel
.type aes_gcm_dec_kernel,%function
.align 4
aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt .Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq .Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
.Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge .Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge .Ldec_prepretail // do prepretail
.Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt .Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq .Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
.Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // .LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt .Ldec_main_loop
.Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt .Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq .Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
.Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
.Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt .Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt .Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt .Ldec_blocks_more_than_1
sub w12, w12, #1
b .Ldec_blocks_less_than_1
.Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
.Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
.Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
.Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size aes_gcm_dec_kernel,.-aes_gcm_dec_kernel
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
Chigozieworldwide/Multi | 10,863 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/ghash-neon-armv8-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl gcm_init_neon
.def gcm_init_neon
.type 32
.endef
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl gcm_gmult_neon
.def gcm_gmult_neon
.type 32
.endef
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl gcm_ghash_neon
.def gcm_ghash_neon
.type 32
.endef
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section .rodata
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Chigozieworldwide/Multi | 9,958 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/pregenerated/vpaes-x86-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.align 64
.L_vpaes_consts:
.long 218628480,235210255,168496130,67568393
.long 252381056,17041926,33884169,51187212
.long 252645135,252645135,252645135,252645135
.long 1512730624,3266504856,1377990664,3401244816
.long 830229760,1275146365,2969422977,3447763452
.long 3411033600,2979783055,338359620,2782886510
.long 4209124096,907596821,221174255,1006095553
.long 191964160,3799684038,3164090317,1589111125
.long 182528256,1777043520,2877432650,3265356744
.long 1874708224,3503451415,3305285752,363511674
.long 1606117888,3487855781,1093350906,2384367825
.long 197121,67569157,134941193,202313229
.long 67569157,134941193,202313229,197121
.long 134941193,202313229,197121,67569157
.long 202313229,197121,67569157,134941193
.long 33619971,100992007,168364043,235736079
.long 235736079,33619971,100992007,168364043
.long 168364043,235736079,33619971,100992007
.long 100992007,168364043,235736079,33619971
.long 50462976,117835012,185207048,252579084
.long 252314880,51251460,117574920,184942860
.long 184682752,252054788,50987272,118359308
.long 118099200,185467140,251790600,50727180
.long 2946363062,528716217,1300004225,1881839624
.long 1532713819,1532713819,1532713819,1532713819
.long 3602276352,4288629033,3737020424,4153884961
.long 1354558464,32357713,2958822624,3775749553
.long 1201988352,132424512,1572796698,503232858
.long 2213177600,1597421020,4103937655,675398315
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
.byte 118,101,114,115,105,116,121,41,0
.align 64
.hidden _vpaes_preheat
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
addl (%esp),%ebp
movdqa -48(%ebp),%xmm7
movdqa -16(%ebp),%xmm6
ret
.size _vpaes_preheat,.-_vpaes_preheat
.hidden _vpaes_encrypt_core
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
movl $16,%ecx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
pxor %xmm5,%xmm2
psrld $4,%xmm1
addl $16,%edx
.byte 102,15,56,0,193
leal 192(%ebp),%ebx
pxor %xmm2,%xmm0
jmp .L000enc_entry
.align 16
.L001enc_loop:
movdqa 32(%ebp),%xmm4
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
movdqa (%ebx,%ecx,1),%xmm4
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addl $16,%ecx
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
subl $1,%eax
pxor %xmm3,%xmm0
.L000enc_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
.byte 102,15,56,0,232
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm7,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz .L001enc_loop
movdqa 96(%ebp),%xmm4
movdqa 112(%ebp),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%ebx,%ecx,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.hidden _vpaes_schedule_core
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
addl (%esp),%ebp
movdqu (%esi),%xmm0
movdqa 320(%ebp),%xmm2
movdqa %xmm0,%xmm3
leal (%ebp),%ebx
movdqa %xmm2,4(%esp)
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
testl %edi,%edi
jnz .L002schedule_am_decrypting
movdqu %xmm0,(%edx)
jmp .L003schedule_go
.L002schedule_am_decrypting:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%edx)
xorl $48,%ecx
.L003schedule_go:
cmpl $192,%eax
ja .L004schedule_256
.L005schedule_128:
movl $10,%eax
.L006loop_schedule_128:
call _vpaes_schedule_round
decl %eax
jz .L007schedule_mangle_last
call _vpaes_schedule_mangle
jmp .L006loop_schedule_128
.align 16
.L004schedule_256:
movdqu 16(%esi),%xmm0
call _vpaes_schedule_transform
movl $7,%eax
.L008loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decl %eax
jz .L007schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
movdqa %xmm7,20(%esp)
movdqa %xmm6,%xmm7
call .L_vpaes_schedule_low_round
movdqa 20(%esp),%xmm7
jmp .L008loop_schedule_256
.align 16
.L007schedule_mangle_last:
leal 384(%ebp),%ebx
testl %edi,%edi
jnz .L009schedule_mangle_last_dec
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,193
leal 352(%ebp),%ebx
addl $32,%edx
.L009schedule_mangle_last_dec:
addl $-16,%edx
pxor 336(%ebp),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.hidden _vpaes_schedule_round
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
movdqa 8(%esp),%xmm2
pxor %xmm1,%xmm1
.byte 102,15,58,15,202,15
.byte 102,15,58,15,210,15
pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0
.byte 102,15,58,15,192,1
movdqa %xmm2,8(%esp)
.L_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor 336(%ebp),%xmm7
movdqa -16(%ebp),%xmm4
movdqa -48(%ebp),%xmm5
movdqa %xmm4,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm4,%xmm0
movdqa -32(%ebp),%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm5,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm5,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa 32(%ebp),%xmm4
.byte 102,15,56,0,226
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.hidden _vpaes_schedule_transform
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
movdqa -16(%ebp),%xmm2
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
movdqa (%ebx),%xmm2
.byte 102,15,56,0,208
movdqa 16(%ebx),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.hidden _vpaes_schedule_mangle
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa 128(%ebp),%xmm5
testl %edi,%edi
jnz .L010schedule_mangle_dec
addl $16,%edx
pxor 336(%ebp),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp .L011schedule_mangle_both
.align 16
.L010schedule_mangle_dec:
movdqa -16(%ebp),%xmm2
leal (%ebp),%esi
movdqa %xmm2,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm4
movdqa (%esi),%xmm2
.byte 102,15,56,0,212
movdqa 16(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addl $-16,%edx
.L011schedule_mangle_both:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
addl $-16,%ecx
andl $48,%ecx
movdqu %xmm3,(%edx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.L_vpaes_set_encrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L012pic_for_function_hit
.L012pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+5-.L012pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
movl $48,%ecx
movl $0,%edi
leal .L_vpaes_consts+0x30-.L013pic_point,%ebp
call _vpaes_schedule_core
.L013pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,@function
.align 16
vpaes_encrypt:
.L_vpaes_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L014pic_for_function_hit
.L014pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+4-.L014pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
leal .L_vpaes_consts+0x30-.L015pic_point,%ebp
call _vpaes_preheat
.L015pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_encrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.