repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
Ali0Alsallami/my-android-app | 2,045 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.21/src/arch/powerpc64_openpower.s | /* Implementation of stack swtiching routines for OpenPOWER 64-bit ELF ABI
The specification can be found at
http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html
This ABI is usually used by the ppc64le targets.
*/
#include "psm.h"
.text
.abiversion 2
.globl rust_psm_stack_direction
.p2align 4
.type rust_psm_stack_direction,@function
rust_psm_stack_direction:
/* extern "C" fn() -> u8 */
.cfi_startproc
li 3, STACK_DIRECTION_DESCENDING
blr
.rust_psm_stack_direction_end:
.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
.cfi_endproc
.globl rust_psm_stack_pointer
.p2align 4
.type rust_psm_stack_pointer,@function
rust_psm_stack_pointer:
/* extern "C" fn() -> *mut u8 */
.cfi_startproc
mr 3, 1
blr
.rust_psm_stack_pointer_end:
.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
.cfi_endproc
.globl rust_psm_replace_stack
.p2align 4
.type rust_psm_replace_stack,@function
rust_psm_replace_stack:
/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
.cfi_startproc
addi 5, 5, -32
mtctr 4
mr 12, 4
mr 1, 5
bctr
.rust_psm_replace_stack_end:
.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
.cfi_endproc
.globl rust_psm_on_stack
.p2align 4
.type rust_psm_on_stack,@function
rust_psm_on_stack:
/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
.cfi_startproc
mflr 0
std 0, -8(6)
std 2, -24(6)
sub 6, 6, 1
addi 6, 6, -48
stdux 1, 1, 6
.cfi_def_cfa r1, 48
.cfi_offset r1, -48
.cfi_offset r2, -24
.cfi_offset lr, -8
mr 12, 5
mtctr 5
bctrl
ld 2, 24(1)
.cfi_restore r2
ld 0, 40(1)
mtlr 0
.cfi_restore lr
/* FIXME: after this instructin backtrace breaks until control returns to the caller */
ld 1, 0(1)
blr
.rust_psm_on_stack_end:
.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
.cfi_endproc
|
alizeeshan1234/doppler-asm | 1,373 | src/doppler-asm/doppler-asm.s | .globl e
e:
.equ ADMIN_HEADER, 0x0008
.equ EXPECTED_ADMIN_HEADER, 0x01ff
.equ ADMIN_KEY_0, 0x0010
.equ ADMIN_KEY_1, 0x0018
.equ ADMIN_KEY_2, 0x0020
.equ ADMIN_KEY_3, 0x0028
.equ EXPECTED_ADMIN_KEY_0, 0x2222222222222222
.equ EXPECTED_ADMIN_KEY_1, 0x2222222222222222
.equ EXPECTED_ADMIN_KEY_2, 0x2222222222222222
.equ ORACLE_HEADER, 0x2868
.equ ORACLE_SEQUENCE, 0x28c0 // u64 (8 bytes)
.equ ORACLE_PRICE, 0x28c8 // u64 (8 bytes)
.equ INSTRUCTION_SEQUENCE, 0x50e0 // u64 (8 bytes)
.equ INSTRUCTION_PRICE, 0x50e8 // u64 (8 bytes)
ldxh r2, [r1+ADMIN_HEADER]
jne r2, EXPECTED_ADMIN_HEADER, abort
ldxdw r2, [r1+ADMIN_KEY_0]
ldxdw r3, [r1+ADMIN_KEY_1]
ldxdw r4, [r1+ADMIN_KEY_2]
ldxdw r0, [r1+ADMIN_KEY_3]
lddw r6, EXPECTED_ADMIN_KEY_0
lddw r7, EXPECTED_ADMIN_KEY_1
lddw r8, EXPECTED_ADMIN_KEY_2
// lddw r9, EXPECTED_ADMIN_KEY_3 // Skip this allocation to save 1 CU
jne r2, r6, abort
jne r3, r7, abort
jne r4, r8, abort
// jne r5, r9, abort // Replace 64-bit comparison with r0 arithmetic below
sub32 r0, 0x22222222 // 32-bit imm widens to 64 bits to cleanly subtract from r0, leaving us with 0x00
ldxdw r2, [r1+INSTRUCTION_SEQUENCE]
ldxdw r3, [r1+ORACLE_SEQUENCE]
jgt r2, r3, update
abort:
mov32 r0, 1
update:
stxdw [r1+ORACLE_SEQUENCE], r2
ldxdw r2, [r1+INSTRUCTION_PRICE]
stxdw [r1+ORACLE_PRICE], r2
exit |
almafa64/arduino-6502-emulator | 652 | test_codes/timer1_one_test.s | DDRA = $0203 ; port A input/output mode
DDRB = $0202 ; port B input/output mode
PORTA = $0200 ; port A data
PORTB = $0201 ; port B data
E = %00000100 ; led display enable
RW = %00000010 ; led display read/write
RS = %00000001 ; led display register select
T1CL = $0204 ; t1 clock low byte
T1CH = $0205 ; t1 clock high byte
ACR = $020B
IFR = $020D
.org $8000
begin:
dec DDRA
stz PORTA
stz ACR
ldx #%00100000 ; A5 pin
loop:
stx PORTA
jsr delay
stz PORTA
jsr delay
bra loop
delay:
lda #$50
sta T1CL
lda #$c3
sta T1CH
delay1:
bit IFR
bvc delay1
lda T1CL
rts
irqb:
rti
.org $fffa
.word $0000
.word begin
.word irqb |
almafa64/arduino-6502-emulator | 3,521 | test_codes/lcd_test.s | DDRA = $0203 ; port A input/output mode
DDRB = $0202 ; port B input/output mode
PORTA = $0200 ; port A data
PORTB = $0201 ; port B data
E = %00000100 ; led display enable
RW = %00000010 ; led display read/write
RS = %00000001 ; led display register select
.org $8000
begin:
dec DDRA ; set all pins to output on PORTA
dec DDRB ; set all pins to output on PORTB
sei
lda #%00111000 ; 8 bit mode, 2 line, 5x8 dot
jsr send_inst
lda #%00000110 ; increment + shift cursor, dont shift display
jsr send_inst
lda #%01000000 ; set CGRAM to $00
jsr send_inst
write_custom_chars: ; send custom characters
lda chars, x
bmi after_customs
jsr send_char
inx
bra write_custom_chars
after_customs:
ldx #0
lda #%00001110 ; display on, cursor on, blink off
jsr send_inst
lda #%00000001 ; clear display
jsr send_inst
send_loop:
lda main_msg, x
beq pre_loop
jsr send_char
inx
bra send_loop
pre_loop:
lda #%00001100 ; display on, cursor off, blink off
jsr send_inst
ldx #0
cli
loop:
cpy #1
bne loop
sei
lda #%00000001 ; clear display
jsr send_inst
send_loop2:
lda alt_msg, x
beq end
jsr send_char
inx
bra send_loop2
end:
stp
lcd_wait:
pha
inc DDRB
lcd_busy:
lda #RW
sta PORTA
lda #RW | E
sta PORTA
bit PORTB
bmi lcd_busy
lda #RW
sta PORTA
dec DDRB
pla
rts
send_inst:
jsr lcd_wait
sta PORTB
stz PORTA
lda #E
sta PORTA
stz PORTA
rts
send_char:
jsr lcd_wait
sta PORTB
lda #RS
sta PORTA
lda #RS | E
sta PORTA
lda #RS
sta PORTA
rts
irqb:
iny
rti
main_msg:
; there is a 24 character gap between the two line
;.asciiz "xxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxx"
; 16 24 16
;.byte " "
;.byte %11110101 ;ü
;.byte "dv"
;.byte %11101111 ;ö
;.byte "z"
;.byte %11101111 ;ö
;.asciiz "llek otthonomban!"
;.byte " K"
;.byte %11101111 ;ö
;.byte "sz"
;.byte %11101111 ;ö
;.byte "n"
;.byte %11101111 ;ö
;.byte "m a felk"
;.byte %11101111 ;ö
;.byte "sz"
;.byte %11101111 ;ö
;.asciiz "nte'st!"
;.asciiz " Legyszives huzz ki! "
.byte " L"
.byte $3 ;é (CGRAM)
.byte "gysz"
.byte $4 ;í (CGRAM)
.byte "ves h"
.byte $6 ;ú (CGRAM)
.asciiz "zz ki! "
chars:
.byte $05, $0A, $0E, $11, $11, $11, $0E, $00 ; ő ;alt -> 0x05, 0x0A, 0x00, 0x0E, 0x11, 0x11, 0x11, 0x0E
.byte $05, $0A, $11, $11, $11, $13, $0D, $00 ; ű ;alt -> 0x05, 0x0A, 0x00, 0x11, 0x11, 0x11, 0x13, 0x0D
.byte $02, $04, $0E, $01, $0F, $11, $0F, $00 ; á ;alt -> 0x02, 0x04, 0x00, 0x0E, 0x01, 0x0F, 0x11, 0x0F
.byte $02, $04, $0E, $11, $1F, $10, $0E, $00 ; é ;alt -> 0x02, 0x04, 0x00, 0x0E, 0x11, 0x1F, 0x10, 0x0E
.byte $02, $04, $00, $0C, $04, $04, $0E, $00 ; í ;alt -> 0x02, 0x04, 0x00, 0x0C, 0x04, 0x04, 0x04, 0x0E
.byte $02, $04, $0E, $11, $11, $11, $0E, $00 ; ó ;alt -> 0x02, 0x04, 0x00, 0x0E, 0x11, 0x11, 0x11, 0x0E
.byte $02, $04, $11, $11, $11, $13, $0D, $00 ; ú ;alt -> 0x02, 0x04, 0x00, 0x11, 0x11, 0x11, 0x13, 0x0D
.byte $FF ; end
alt_msg:
.byte " NE PISZK"
.byte $2 ;á (CGRAM)
.asciiz "LD AZT A GOMBOT!!!!"
.org $fffa
.word $0000
.word begin
.word irqb |
almafa64/arduino-6502-emulator | 1,067 | test_codes/timer1_free_test.s | DDRA = $0203 ; port A input/output mode
DDRB = $0202 ; port B input/output mode
PORTA = $0200 ; port A data
PORTB = $0201 ; port B data
E = %00000100 ; led display enable
RW = %00000010 ; led display read/write
RS = %00000001 ; led display register select
T1CL = $0204 ; t1 clock low byte
T1CH = $0205 ; t1 clock high byte
ACR = $020B
IFR = $020D
IER = $020E
ticks = $00
toggle_time = $04
.org $8000
begin:
dec DDRB
stz PORTB
stz ACR
stz toggle_time
jsr init_timer
loop:
jsr update_led
bra loop
update_led:
sec
lda ticks
sbc toggle_time
cmp #25 ; have 250ms elapsed
bcc exit_update_led
lda #$01
eor PORTB
sta PORTB ; toggle led
lda ticks
sta toggle_time
exit_update_led:
rts
init_timer:
stz ticks + 0
stz ticks + 1
stz ticks + 2
stz ticks + 3
lda #%01000000
sta ACR
lda #$0E
sta T1CL
lda #$27
sta T1CH
lda #%11000000
sta IER
cli
rts
irqb:
bit T1CL
inc ticks + 0
bne end_irq
inc ticks + 1
bne end_irq
inc ticks + 2
bne end_irq
inc ticks + 3
end_irq:
rti
.org $fffa
.word $0000
.word begin
.word irqb |
alperenbekci/buildh3r-september | 9,203 | zkm/runtime/entrypoint/src/memset.s | // This is musl-libc memset commit 5613a1486e6a6fc3988be6561f41b07b2647d80f:
//
// src/string/memset.c
//
// This was compiled into assembly with:
//
// clang10 -target mips -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contibutors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.file "memset.c"
.globl memset # -- Begin function memset
.p2align 2
.type memset,@function
.set nomicromips
.set nomips16
.ent memset
memset: # @memset
.frame $fp,8,$ra
.mask 0xc0000000,-4
.fmask 0x00000000,0
.set noreorder
.set nomacro
.set noat
# %bb.0:
addiu $sp, $sp, -8
sw $ra, 4($sp) # 4-byte Folded Spill
sw $fp, 0($sp) # 4-byte Folded Spill
move $fp, $sp
beqz $6, $BBmemset0_9
nop
# %bb.1:
addu $2, $6, $4
sltiu $1, $6, 3
sb $5, 0($4)
bnez $1, $BBmemset0_9
sb $5, -1($2)
# %bb.2:
sltiu $1, $6, 7
sb $5, 2($4)
sb $5, 1($4)
sb $5, -3($2)
bnez $1, $BBmemset0_9
sb $5, -2($2)
# %bb.3:
sltiu $1, $6, 9
sb $5, 3($4)
bnez $1, $BBmemset0_9
sb $5, -4($2)
# %bb.4:
andi $2, $5, 255
negu $1, $4
sll $5, $2, 8
sll $7, $2, 16
andi $1, $1, 3
or $5, $5, $2
sll $2, $2, 24
addu $3, $4, $1
subu $1, $6, $1
or $5, $7, $5
or $2, $2, $5
addiu $5, $zero, -4
and $5, $1, $5
sw $2, 0($3)
addu $6, $3, $5
sltiu $1, $5, 9
bnez $1, $BBmemset0_9
sw $2, -4($6)
# %bb.5:
sltiu $1, $5, 25
sw $2, 8($3)
sw $2, 4($3)
sw $2, -8($6)
bnez $1, $BBmemset0_9
sw $2, -12($6)
# %bb.6:
andi $1, $3, 4
sw $2, 24($3)
sw $2, 20($3)
sw $2, 16($3)
sw $2, 12($3)
sw $2, -16($6)
sw $2, -20($6)
sw $2, -24($6)
sw $2, -28($6)
ori $6, $1, 24
subu $5, $5, $6
sltiu $1, $5, 32
bnez $1, $BBmemset0_9
nop
# %bb.7:
addu $3, $3, $6
$BBmemset0_8: # =>This Inner Loop Header: Depth=1
addiu $5, $5, -32
sw $2, 24($3)
sw $2, 16($3)
sw $2, 8($3)
sw $2, 0($3)
sw $2, 28($3)
sw $2, 20($3)
sw $2, 12($3)
sw $2, 4($3)
sltiu $1, $5, 32
beqz $1, $BBmemset0_8
addiu $3, $3, 32
$BBmemset0_9:
move $2, $4
move $sp, $fp
lw $fp, 0($sp) # 4-byte Folded Reload
lw $ra, 4($sp) # 4-byte Folded Reload
jr $ra
addiu $sp, $sp, 8
.set at
.set macro
.set reorder
.end memset
$memset_func_end0:
.size memset, ($memset_func_end0)-memset
# -- End function
.ident "clang version 10.0.0-4ubuntu1 "
.section ".note.GNU-stack","",@progbits
.addrsig |
alperenbekci/buildh3r-september | 9,951 | zkm/runtime/entrypoint/src/memcpy.s | // This is musl-libc commit 3b0a370020c4d5b80ff32a609e5322b7760f0dc4:
//
// src/string/memcpy.c
//
// This was compiled into assembly with:
//
// clang-10 -target mips -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contibutors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.file "memcpy.c"
.globl memccpy # -- Begin function memccpy
.p2align 2
.type memccpy,@function
.set nomicromips
.set nomips16
.ent memccpy
memccpy: # @memccpy
.frame $fp,8,$ra
.mask 0xc0000000,-4
.fmask 0x00000000,0
.set noreorder
.set nomacro
.set noat
# %bb.0:
addiu $sp, $sp, -8
sw $ra, 4($sp) # 4-byte Folded Spill
sw $fp, 0($sp) # 4-byte Folded Spill
move $fp, $sp
xor $1, $5, $4
andi $1, $1, 3
beqz $1, $BBmemcpy0_7
andi $3, $6, 255
$BBmemcpy0_1:
beqz $7, $BBmemcpy0_5
nop
# %bb.2:
addiu $2, $4, 1
$BBmemcpy0_3: # =>This Inner Loop Header: Depth=1
lbu $1, 0($5)
beq $1, $3, $BBmemcpy0_6
sb $1, -1($2)
# %bb.4: # in Loop: Header=BBmemcpy0_3 Depth=1
addiu $2, $2, 1
addiu $7, $7, -1
bnez $7, $BBmemcpy0_3
addiu $5, $5, 1
$BBmemcpy0_5:
addiu $2, $zero, 0
$BBmemcpy0_6:
move $sp, $fp
lw $fp, 0($sp) # 4-byte Folded Reload
lw $ra, 4($sp) # 4-byte Folded Reload
jr $ra
addiu $sp, $sp, 8
$BBmemcpy0_7:
andi $6, $5, 3
beqz $7, $BBmemcpy0_14
sltu $2, $zero, $6
# %bb.8:
beqz $6, $BBmemcpy0_14
nop
# %bb.9:
addiu $2, $7, -1
addiu $6, $zero, 0
$BBmemcpy0_10: # =>This Inner Loop Header: Depth=1
addu $9, $5, $6
addu $8, $4, $6
lbu $1, 0($9)
beq $1, $3, $BBmemcpy0_22
sb $1, 0($8)
# %bb.11: # in Loop: Header=BBmemcpy0_10 Depth=1
addiu $1, $9, 1
addiu $8, $6, 1
beq $2, $6, $BBmemcpy0_13
andi $9, $1, 3
# %bb.12: # in Loop: Header=BBmemcpy0_10 Depth=1
bnez $9, $BBmemcpy0_10
move $6, $8
$BBmemcpy0_13:
sltu $2, $zero, $9
subu $7, $7, $8
addu $4, $4, $8
addu $5, $5, $8
$BBmemcpy0_14:
beqz $2, $BBmemcpy0_17
nop
# %bb.15:
bnez $7, $BBmemcpy0_6
addiu $2, $4, 1
# %bb.16:
j $BBmemcpy0_5
nop
$BBmemcpy0_17:
sltiu $1, $7, 4
bnez $1, $BBmemcpy0_1
nop
# %bb.18:
sll $1, $3, 8
sll $2, $3, 16
or $1, $1, $3
or $1, $2, $1
sll $2, $3, 24
or $6, $2, $1
lui $1, 65278
andi $2, $7, 3
ori $8, $1, 65279
lui $1, 32896
ori $9, $1, 32896
$BBmemcpy0_19: # =>This Inner Loop Header: Depth=1
lw $10, 0($5)
xor $1, $10, $6
addu $11, $1, $8
not $1, $1
and $1, $1, $11
and $1, $1, $9
bnez $1, $BBmemcpy0_1
nop
# %bb.20: # in Loop: Header=BBmemcpy0_19 Depth=1
addiu $7, $7, -4
sw $10, 0($4)
addiu $4, $4, 4
sltiu $1, $7, 4
beqz $1, $BBmemcpy0_19
addiu $5, $5, 4
# %bb.21:
j $BBmemcpy0_1
move $7, $2
$BBmemcpy0_22:
j $BBmemcpy0_6
addiu $2, $8, 1
.set at
.set macro
.set reorder
.end memccpy
$memcpy_func_end0:
.size memccpy, ($memcpy_func_end0)-memccpy
# -- End function
.ident "clang version 10.0.0-4ubuntu1 "
.section ".note.GNU-stack","",@progbits
.addrsig |
alperenbekci/buildh3r-september | 501 | zkm/go-runtime/zkm_runtime/syscall_mips.s | //go:build mips
// +build mips
TEXT ·SyscallWrite(SB), $0-24
MOVW $4004, R2 // #define SYS_write 4004
MOVW fd+0(FP), R4
MOVW write_buf+4(FP), R5
MOVW nbytes+16(FP), R6
SYSCALL
MOVW R2, ret+0(FP)
RET
TEXT ·SyscallHintLen(SB), $0-4
MOVW $0xF0, R2 // #define SYS_hint_len 0xF0
SYSCALL
MOVW R2, ret+0(FP)
RET
TEXT ·SyscallHintRead(SB), $0-16
MOVW $0xF1, R2 // #define SYS_hint_read 0xF1
MOVW ptr+0(FP), R4
MOVW len+12(FP), R5
SYSCALL
RET
|
alt-22/incubator-teaclave-sgx-sdk | 2,830 | sgx_unwind/libunwind/src/x86_64/setcontext.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2007 Google, Inc
Contributed by Arun Sharma <arun.sharma@google.com>
Copyright (C) 2010 Konstantin Belousov <kib@freebsd.org>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "ucontext_i.h"
/* int _Ux86_64_setcontext (const ucontext_t *ucp)
Restores the machine context provided.
Unlike the libc implementation, doesn't clobber %rax
*/
.global _Ux86_64_setcontext
.type _Ux86_64_setcontext, @function
_Ux86_64_setcontext:
#if defined __linux__
/* restore fp state */
mov UC_MCONTEXT_FPREGS_PTR(%rdi),%r8
fldenv (%r8)
ldmxcsr FPREGS_OFFSET_MXCSR(%r8)
#elif defined __FreeBSD__
/* restore fp state */
cmpq $UC_MCONTEXT_FPOWNED_FPU,UC_MCONTEXT_OWNEDFP(%rdi)
jne 1f
cmpq $UC_MCONTEXT_FPFMT_XMM,UC_MCONTEXT_FPFORMAT(%rdi)
jne 1f
fxrstor UC_MCONTEXT_FPSTATE(%rdi)
1:
#else
#error Port me
#endif
/* restore the rest of the state */
mov UC_MCONTEXT_GREGS_R8(%rdi),%r8
mov UC_MCONTEXT_GREGS_R9(%rdi),%r9
mov UC_MCONTEXT_GREGS_RBX(%rdi),%rbx
mov UC_MCONTEXT_GREGS_RBP(%rdi),%rbp
mov UC_MCONTEXT_GREGS_R12(%rdi),%r12
mov UC_MCONTEXT_GREGS_R13(%rdi),%r13
mov UC_MCONTEXT_GREGS_R14(%rdi),%r14
mov UC_MCONTEXT_GREGS_R15(%rdi),%r15
mov UC_MCONTEXT_GREGS_RSI(%rdi),%rsi
mov UC_MCONTEXT_GREGS_RDX(%rdi),%rdx
mov UC_MCONTEXT_GREGS_RAX(%rdi),%rax
mov UC_MCONTEXT_GREGS_RCX(%rdi),%rcx
mov UC_MCONTEXT_GREGS_RSP(%rdi),%rsp
/* push the return address on the stack */
mov UC_MCONTEXT_GREGS_RIP(%rdi),%rcx
push %rcx
mov UC_MCONTEXT_GREGS_RCX(%rdi),%rcx
mov UC_MCONTEXT_GREGS_RDI(%rdi),%rdi
retq
.size _Ux86_64_setcontext, . - _Ux86_64_setcontext
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
alt-22/incubator-teaclave-sgx-sdk | 1,438 | sgx_unwind/libunwind/src/x86_64/siglongjmp.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2004 Hewlett-Packard Co
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
.globl _UI_siglongjmp_cont
.type _UI_siglongjmp_cont, @function
_UI_siglongjmp_cont:
retq
.size _UI_siglongjmp_cont, . - _UI_siglongjmp_cont
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
alt-22/incubator-teaclave-sgx-sdk | 4,427 | sgx_unwind/libunwind/src/x86_64/getcontext.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 Google, Inc
Contributed by Paul Pluzhnikov <ppluzhnikov@google.com>
Copyright (C) 2010 Konstantin Belousov <kib@freebsd.org>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "ucontext_i.h"
/* int _Ux86_64_getcontext (ucontext_t *ucp)
Saves the machine context in UCP necessary for libunwind.
Unlike the libc implementation, we don't save the signal mask
and hence avoid the cost of a system call per unwind.
*/
.global _Ux86_64_getcontext
.type _Ux86_64_getcontext, @function
_Ux86_64_getcontext:
.cfi_startproc
/* Callee saved: RBX, RBP, R12-R15 */
movq %r12, UC_MCONTEXT_GREGS_R12(%rdi)
movq %r13, UC_MCONTEXT_GREGS_R13(%rdi)
movq %r14, UC_MCONTEXT_GREGS_R14(%rdi)
movq %r15, UC_MCONTEXT_GREGS_R15(%rdi)
movq %rbp, UC_MCONTEXT_GREGS_RBP(%rdi)
movq %rbx, UC_MCONTEXT_GREGS_RBX(%rdi)
/* Save argument registers (not strictly needed, but setcontext
restores them, so don't restore garbage). */
movq %r8, UC_MCONTEXT_GREGS_R8(%rdi)
movq %r9, UC_MCONTEXT_GREGS_R9(%rdi)
movq %rdi, UC_MCONTEXT_GREGS_RDI(%rdi)
movq %rsi, UC_MCONTEXT_GREGS_RSI(%rdi)
movq %rdx, UC_MCONTEXT_GREGS_RDX(%rdi)
movq %rax, UC_MCONTEXT_GREGS_RAX(%rdi)
movq %rcx, UC_MCONTEXT_GREGS_RCX(%rdi)
#if defined __linux__
/* Save fp state (not needed, except for setcontext not
restoring garbage). */
leaq UC_MCONTEXT_FPREGS_MEM(%rdi),%r8
movq %r8, UC_MCONTEXT_FPREGS_PTR(%rdi)
fnstenv (%r8)
stmxcsr FPREGS_OFFSET_MXCSR(%r8)
#elif defined __FreeBSD__
fxsave UC_MCONTEXT_FPSTATE(%rdi)
movq $UC_MCONTEXT_FPOWNED_FPU,UC_MCONTEXT_OWNEDFP(%rdi)
movq $UC_MCONTEXT_FPFMT_XMM,UC_MCONTEXT_FPFORMAT(%rdi)
/* Save rflags and segment registers, so that sigreturn(2)
does not complain. */
pushfq
.cfi_adjust_cfa_offset 8
popq UC_MCONTEXT_RFLAGS(%rdi)
.cfi_adjust_cfa_offset -8
movl $0, UC_MCONTEXT_FLAGS(%rdi)
movw %cs, UC_MCONTEXT_CS(%rdi)
movw %ss, UC_MCONTEXT_SS(%rdi)
#if 0
/* Setting the flags to 0 above disables restore of segment
registers from the context */
movw %ds, UC_MCONTEXT_DS(%rdi)
movw %es, UC_MCONTEXT_ES(%rdi)
movw %fs, UC_MCONTEXT_FS(%rdi)
movw %gs, UC_MCONTEXT_GS(%rdi)
#endif
movq $UC_MCONTEXT_MC_LEN_VAL, UC_MCONTEXT_MC_LEN(%rdi)
#else
#error Port me
#endif
leaq 8(%rsp), %rax /* exclude this call. */
movq %rax, UC_MCONTEXT_GREGS_RSP(%rdi)
movq 0(%rsp), %rax
movq %rax, UC_MCONTEXT_GREGS_RIP(%rdi)
xorq %rax, %rax
retq
.cfi_endproc
.size _Ux86_64_getcontext, . - _Ux86_64_getcontext
/* int _Ux86_64_getcontext_trace (ucontext_t *ucp)
Saves limited machine context in UCP necessary for libunwind.
Unlike _Ux86_64_getcontext, saves only the parts needed for
fast trace. If fast trace fails, caller will have to get the
full context.
*/
.global _Ux86_64_getcontext_trace
.hidden _Ux86_64_getcontext_trace
.type _Ux86_64_getcontext_trace, @function
_Ux86_64_getcontext_trace:
.cfi_startproc
/* Save only RBP, RBX, RSP, RIP - exclude this call. */
movq %rbp, UC_MCONTEXT_GREGS_RBP(%rdi)
movq %rbx, UC_MCONTEXT_GREGS_RBX(%rdi)
leaq 8(%rsp), %rax
movq %rax, UC_MCONTEXT_GREGS_RSP(%rdi)
movq 0(%rsp), %rax
movq %rax, UC_MCONTEXT_GREGS_RIP(%rdi)
xorq %rax, %rax
retq
.cfi_endproc
.size _Ux86_64_getcontext_trace, . - _Ux86_64_getcontext_trace
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
alt-22/incubator-teaclave-sgx-sdk | 1,520 | sgx_unwind/libunwind/src/x86_64/longjmp.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2004-2005 Hewlett-Packard Co
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
.globl _UI_longjmp_cont
.type _UI_longjmp_cont, @function
_UI_longjmp_cont:
push %rax /* push target IP as return address */
mov %rdx, %rax /* set up return-value */
retq
.size _UI_longjmp_cont, .-_UI_longjmp_cont
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
alt-22/incubator-teaclave-sgx-sdk | 15,163 | samplecode/decryption_enclave/enclave/bellerophon/crypto/pcl_ghash-x86_64.s | /*
* Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* ====================================================================
* Copyright (c) 1998-2017 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/*
* Content from openssl-1.1.0e/crypto/modes/ghash-x86_64.s
* which is auto-generated by openssl-1.1.0e/crypto/modes/asm/ghash-x86_64.pl
*/
.text
.globl pcl_gcm_init_clmul
.type pcl_gcm_init_clmul,@function
.align 16
pcl_gcm_init_clmul:
.L_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand .L0x1c2_polynomial(%rip),%xmm5
pxor %xmm5,%xmm2
pshufd $78,%xmm2,%xmm6
movdqa %xmm2,%xmm0
pxor %xmm2,%xmm6
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,0(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%rdi)
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm5
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm5,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm5,%xmm3
movdqu %xmm5,48(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,64(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,80(%rdi)
.byte 0xf3,0xc3
.size pcl_gcm_init_clmul,.-pcl_gcm_init_clmul
.globl pcl_gcm_gmult_clmul
.type pcl_gcm_gmult_clmul,@function
.align 16
pcl_gcm_gmult_clmul:
.L_gmult_clmul:
movdqu (%rdi),%xmm0
movdqa .Lbswap_mask(%rip),%xmm5
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm4
.byte 102,15,56,0,197
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%rdi)
.byte 0xf3,0xc3
.size pcl_gcm_gmult_clmul,.-pcl_gcm_gmult_clmul
.globl pcl_gcm_ghash_clmul
.type pcl_gcm_ghash_clmul,@function
.align 32
pcl_gcm_ghash_clmul:
.L_ghash_clmul:
movdqa .Lbswap_mask(%rip),%xmm10
movdqu (%rdi),%xmm0
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194
subq $0x10,%rcx
jz .Lodd_tail
movdqu 16(%rsi),%xmm6
# Commenting out Silvermont optimizations: movl OPENSSL_ia32cap_P+4(%rip),%eax
cmpq $0x30,%rcx
jb .Lskip4x
# Commenting out Silvermont optimizations: andl $71303168,%eax
# Commenting out Silvermont optimizations: cmpl $4194304,%eax
# Commenting out Silvermont optimizations: je .Lskip4x
subq $0x30,%rcx
movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15
movdqu 48(%rdx),%xmm3
movdqu 32(%rdx),%xmm11
.byte 102,65,15,56,0,218
.byte 102,69,15,56,0,218
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm11,%xmm12
.byte 102,68,15,58,68,222,0
.byte 102,68,15,58,68,238,17
.byte 102,68,15,58,68,231,16
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
xorps %xmm12,%xmm4
movdqu 16(%rdx),%xmm11
movdqu 0(%rdx),%xmm8
.byte 102,69,15,56,0,218
.byte 102,69,15,56,0,194
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm8,%xmm0
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,69,15,58,68,238,17
.byte 102,68,15,58,68,231,0
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jc .Ltail4x
jmp .Lmod4_loop
.align 32
.Lmod4_loop:
.byte 102,65,15,58,68,199,0
xorps %xmm12,%xmm4
movdqu 48(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,65,15,58,68,207,17
xorps %xmm3,%xmm0
movdqu 32(%rdx),%xmm3
movdqa %xmm11,%xmm13
.byte 102,68,15,58,68,199,16
pshufd $78,%xmm11,%xmm12
xorps %xmm5,%xmm1
pxor %xmm11,%xmm12
.byte 102,65,15,56,0,218
movups 32(%rsi),%xmm7
xorps %xmm4,%xmm8
.byte 102,68,15,58,68,218,0
pshufd $78,%xmm3,%xmm4
pxor %xmm0,%xmm8
movdqa %xmm3,%xmm5
pxor %xmm1,%xmm8
pxor %xmm3,%xmm4
movdqa %xmm8,%xmm9
.byte 102,68,15,58,68,234,17
pslldq $8,%xmm8
psrldq $8,%xmm9
pxor %xmm8,%xmm0
movdqa .L7_mask(%rip),%xmm8
pxor %xmm9,%xmm1
.byte 102,76,15,110,200
pand %xmm0,%xmm8
.byte 102,69,15,56,0,200
pxor %xmm0,%xmm9
.byte 102,68,15,58,68,231,0
psllq $57,%xmm9
movdqa %xmm9,%xmm8
pslldq $8,%xmm9
.byte 102,15,58,68,222,0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
movdqu 0(%rdx),%xmm8
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,238,17
xorps %xmm11,%xmm3
movdqu 16(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,15,58,68,231,16
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
.byte 102,69,15,56,0,194
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
movdqa %xmm11,%xmm13
pxor %xmm12,%xmm4
pshufd $78,%xmm11,%xmm12
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm1
.byte 102,69,15,58,68,238,17
xorps %xmm11,%xmm3
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,68,15,58,68,231,0
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jnc .Lmod4_loop
.Ltail4x:
.byte 102,65,15,58,68,199,0
.byte 102,65,15,58,68,207,17
.byte 102,68,15,58,68,199,16
xorps %xmm12,%xmm4
xorps %xmm3,%xmm0
xorps %xmm5,%xmm1
pxor %xmm0,%xmm1
pxor %xmm4,%xmm8
pxor %xmm1,%xmm8
pxor %xmm0,%xmm1
movdqa %xmm8,%xmm9
psrldq $8,%xmm8
pslldq $8,%xmm9
pxor %xmm8,%xmm1
pxor %xmm9,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
addq $0x40,%rcx
jz .Ldone
movdqu 32(%rsi),%xmm7
subq $0x10,%rcx
jz .Lodd_tail
.Lskip4x:
movdqu (%rdx),%xmm8
movdqu 16(%rdx),%xmm3
.byte 102,69,15,56,0,194
.byte 102,65,15,56,0,218
pxor %xmm8,%xmm0
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
leaq 32(%rdx),%rdx
nop
subq $0x20,%rcx
jbe .Leven_tail
nop
jmp .Lmod_loop
.align 32
.Lmod_loop:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
movdqu (%rdx),%xmm9
pxor %xmm0,%xmm8
.byte 102,69,15,56,0,202
movdqu 16(%rdx),%xmm3
pxor %xmm1,%xmm8
pxor %xmm9,%xmm1
pxor %xmm8,%xmm4
.byte 102,65,15,56,0,218
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm3,%xmm5
movdqa %xmm0,%xmm9
movdqa %xmm0,%xmm8
psllq $5,%xmm0
pxor %xmm0,%xmm8
.byte 102,15,58,68,218,0
psllq $1,%xmm0
pxor %xmm8,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm8
pslldq $8,%xmm0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pshufd $78,%xmm5,%xmm4
pxor %xmm8,%xmm1
pxor %xmm5,%xmm4
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,234,17
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
pxor %xmm9,%xmm0
leaq 32(%rdx),%rdx
psrlq $1,%xmm0
.byte 102,15,58,68,231,0
pxor %xmm1,%xmm0
subq $0x20,%rcx
ja .Lmod_loop
.Leven_tail:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
pxor %xmm0,%xmm8
pxor %xmm1,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testq %rcx,%rcx
jnz .Ldone
.Lodd_tail:
movdqu (%rdx),%xmm8
.byte 102,69,15,56,0,194
pxor %xmm8,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,223,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.Ldone:
.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
.byte 0xf3,0xc3
.size pcl_gcm_ghash_clmul,.-pcl_gcm_ghash_clmul
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.L0x1c2_polynomial:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
.L7_mask:
.long 7,0,7,0
.L7_mask_poly:
.long 7,0,450,0
|
alt-22/incubator-teaclave-sgx-sdk | 18,264 | samplecode/decryption_enclave/enclave/bellerophon/crypto/pcl_vpaes-x86_64.s | /*
* Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* ====================================================================
* Copyright (c) 1998-2017 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/*
* Content from openssl-1.1.0e/crypto/aes/vpaes-x86_64.s
* which is auto-generated by openssl-1.1.0e/crypto/aes/asm/vpaes-x86_64.pl
*/
.text
.type _pcl_vpaes_encrypt_core,@function
.align 16
_pcl_vpaes_encrypt_core:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa .Lk_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa .Lk_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq .Lk_mc_backward(%rip),%r10
jmp .Lenc_entry
.align 16
.Lenc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
.Lenc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz .Lenc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
.byte 0xf3,0xc3
.size _pcl_vpaes_encrypt_core,.-_pcl_vpaes_encrypt_core
.type _pcl_vpaes_decrypt_core,@function
.align 16
_pcl_vpaes_decrypt_core:
movq %rdx,%r9
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa .Lk_dipt(%rip),%xmm2
pandn %xmm0,%xmm1
movq %rax,%r11
psrld $4,%xmm1
movdqu (%r9),%xmm5
shlq $4,%r11
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa .Lk_dipt+16(%rip),%xmm0
xorq $0x30,%r11
leaq .Lk_dsbd(%rip),%r10
.byte 102,15,56,0,193
andq $0x30,%r11
pxor %xmm5,%xmm2
movdqa .Lk_mc_forward+48(%rip),%xmm5
pxor %xmm2,%xmm0
addq $16,%r9
addq %r10,%r11
jmp .Ldec_entry
.align 16
.Ldec_loop:
movdqa -32(%r10),%xmm4
movdqa -16(%r10),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 0(%r10),%xmm4
pxor %xmm1,%xmm0
movdqa 16(%r10),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 32(%r10),%xmm4
pxor %xmm1,%xmm0
movdqa 48(%r10),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 64(%r10),%xmm4
pxor %xmm1,%xmm0
movdqa 80(%r10),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
addq $16,%r9
.byte 102,15,58,15,237,12
pxor %xmm1,%xmm0
subq $1,%rax
.Ldec_entry:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
movdqa %xmm11,%xmm2
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm2,%xmm3
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm0
pxor %xmm1,%xmm3
jnz .Ldec_loop
movdqa 96(%r10),%xmm4
.byte 102,15,56,0,226
pxor %xmm0,%xmm4
movdqa 112(%r10),%xmm0
movdqa -352(%r11),%xmm2
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
.byte 102,15,56,0,194
.byte 0xf3,0xc3
.size _pcl_vpaes_decrypt_core,.-_pcl_vpaes_decrypt_core
.type _pcl_vpaes_schedule_core,@function
.align 16
_pcl_vpaes_schedule_core:
call _pcl_vpaes_preheat
movdqa .Lk_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq .Lk_ipt(%rip),%r11
call _pcl_vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq .Lk_sr(%rip),%r10
testq %rcx,%rcx
jnz .Lschedule_am_decrypting
movdqu %xmm0,(%rdx)
jmp .Lschedule_go
.Lschedule_am_decrypting:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%rdx)
xorq $0x30,%r8
.Lschedule_go:
cmpl $192,%esi
ja .Lschedule_256
je .Lschedule_192
.Lschedule_128:
movl $10,%esi
.Loop_schedule_128:
call _pcl_vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _pcl_vpaes_schedule_mangle
jmp .Loop_schedule_128
.align 16
.Lschedule_192:
movdqu 8(%rdi),%xmm0
call _pcl_vpaes_schedule_transform
movdqa %xmm0,%xmm6
pxor %xmm4,%xmm4
movhlps %xmm4,%xmm6
movl $4,%esi
.Loop_schedule_192:
call _pcl_vpaes_schedule_round
.byte 102,15,58,15,198,8
call _pcl_vpaes_schedule_mangle
call _pcl_vpaes_schedule_192_smear
call _pcl_vpaes_schedule_mangle
call _pcl_vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _pcl_vpaes_schedule_mangle
call _pcl_vpaes_schedule_192_smear
jmp .Loop_schedule_192
.align 16
.Lschedule_256:
movdqu 16(%rdi),%xmm0
call _pcl_vpaes_schedule_transform
movl $7,%esi
.Loop_schedule_256:
call _pcl_vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _pcl_vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _pcl_vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _pcl_vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp .Loop_schedule_256
.align 16
.Lschedule_mangle_last:
leaq .Lk_deskew(%rip),%r11
testq %rcx,%rcx
jnz .Lschedule_mangle_last_dec
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq .Lk_opt(%rip),%r11
addq $32,%rdx
.Lschedule_mangle_last_dec:
addq $-16,%rdx
pxor .Lk_s63(%rip),%xmm0
call _pcl_vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
.byte 0xf3,0xc3
.size _pcl_vpaes_schedule_core,.-_pcl_vpaes_schedule_core
.type _pcl_vpaes_schedule_192_smear,@function
.align 16
_pcl_vpaes_schedule_192_smear:
pshufd $0x80,%xmm6,%xmm1
pshufd $0xFE,%xmm7,%xmm0
pxor %xmm1,%xmm6
pxor %xmm1,%xmm1
pxor %xmm0,%xmm6
movdqa %xmm6,%xmm0
movhlps %xmm1,%xmm6
.byte 0xf3,0xc3
.size _pcl_vpaes_schedule_192_smear,.-_pcl_vpaes_schedule_192_smear
.type _pcl_vpaes_schedule_round,@function
.align 16
_pcl_vpaes_schedule_round:
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_pcl_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor .Lk_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
.byte 0xf3,0xc3
.size _pcl_vpaes_schedule_round,.-_pcl_vpaes_schedule_round
.type _pcl_vpaes_schedule_transform,@function
.align 16
_pcl_vpaes_schedule_transform:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
.byte 0xf3,0xc3
.size _pcl_vpaes_schedule_transform,.-_pcl_vpaes_schedule_transform
.type _pcl_vpaes_schedule_mangle,@function
.align 16
_pcl_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa .Lk_mc_forward(%rip),%xmm5
testq %rcx,%rcx
jnz .Lschedule_mangle_dec
addq $16,%rdx
pxor .Lk_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp .Lschedule_mangle_both
.align 16
.Lschedule_mangle_dec:
leaq .Lk_dksd(%rip),%r11
movdqa %xmm9,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm4
movdqa 0(%r11),%xmm2
.byte 102,15,56,0,212
movdqa 16(%r11),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%r11),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%r11),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%r11),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%r11),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%r11),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%r11),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addq $-16,%rdx
.Lschedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
.byte 0xf3,0xc3
.size _pcl_vpaes_schedule_mangle,.-_pcl_vpaes_schedule_mangle
.globl pcl_vpaes_set_encrypt_key
.type pcl_vpaes_set_encrypt_key,@function
.align 16
pcl_vpaes_set_encrypt_key:
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _pcl_vpaes_schedule_core
xorl %eax,%eax
.byte 0xf3,0xc3
.size pcl_vpaes_set_encrypt_key,.-pcl_vpaes_set_encrypt_key
.globl pcl_vpaes_set_decrypt_key
.type pcl_vpaes_set_decrypt_key,@function
.align 16
pcl_vpaes_set_decrypt_key:
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
shll $4,%eax
leaq 16(%rdx,%rax,1),%rdx
movl $1,%ecx
movl %esi,%r8d
shrl $1,%r8d
andl $32,%r8d
xorl $32,%r8d
call _pcl_vpaes_schedule_core
xorl %eax,%eax
.byte 0xf3,0xc3
.size pcl_vpaes_set_decrypt_key,.-pcl_vpaes_set_decrypt_key
.globl pcl_vpaes_encrypt
.type pcl_vpaes_encrypt,@function
.align 16
pcl_vpaes_encrypt:
movdqu (%rdi),%xmm0
call _pcl_vpaes_preheat
call _pcl_vpaes_encrypt_core
movdqu %xmm0,(%rsi)
.byte 0xf3,0xc3
.size pcl_vpaes_encrypt,.-pcl_vpaes_encrypt
.globl pcl_vpaes_decrypt
.type pcl_vpaes_decrypt,@function
.align 16
pcl_vpaes_decrypt:
movdqu (%rdi),%xmm0
call _pcl_vpaes_preheat
call _pcl_vpaes_decrypt_core
movdqu %xmm0,(%rsi)
.byte 0xf3,0xc3
.size pcl_vpaes_decrypt,.-pcl_vpaes_decrypt
.globl pcl_vpaes_cbc_encrypt
.type pcl_vpaes_cbc_encrypt,@function
.align 16
pcl_vpaes_cbc_encrypt:
xchgq %rcx,%rdx
subq $16,%rcx
jc .Lcbc_abort
movdqu (%r8),%xmm6
subq %rdi,%rsi
call _pcl_vpaes_preheat
cmpl $0,%r9d
je .Lcbc_dec_loop
jmp .Lcbc_enc_loop
.align 16
.Lcbc_enc_loop:
movdqu (%rdi),%xmm0
pxor %xmm6,%xmm0
call _pcl_vpaes_encrypt_core
movdqa %xmm0,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
leaq 16(%rdi),%rdi
subq $16,%rcx
jnc .Lcbc_enc_loop
jmp .Lcbc_done
.align 16
.Lcbc_dec_loop:
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm7
call _pcl_vpaes_decrypt_core
pxor %xmm6,%xmm0
movdqa %xmm7,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
leaq 16(%rdi),%rdi
subq $16,%rcx
jnc .Lcbc_dec_loop
.Lcbc_done:
movdqu %xmm6,(%r8)
.Lcbc_abort:
.byte 0xf3,0xc3
.size pcl_vpaes_cbc_encrypt,.-pcl_vpaes_cbc_encrypt
.type _pcl_vpaes_preheat,@function
.align 16
_pcl_vpaes_preheat:
leaq .Lk_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
.byte 0xf3,0xc3
.size _pcl_vpaes_preheat,.-_pcl_vpaes_preheat
.type _pcl_vpaes_consts,@object
.align 64
_pcl_vpaes_consts:
.Lk_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
.Lk_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.Lk_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.Lk_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
.Lk_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
.Lk_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.Lk_dksd:
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
.Lk_dksb:
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
.Lk_dkse:
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
.Lk_dks9:
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
.Lk_dipt:
.quad 0x0F505B040B545F00, 0x154A411E114E451A
.quad 0x86E383E660056500, 0x12771772F491F194
.Lk_dsb9:
.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
.Lk_dsbd:
.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
.Lk_dsbb:
.quad 0xD022649296B44200, 0x602646F6B0F2D404
.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
.Lk_dsbe:
.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
.Lk_dsbo:
.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 64
.size _pcl_vpaes_consts,.-_pcl_vpaes_consts
|
AmitPr/RISCuit | 3,112 | riscv/dhrystone/strcmp.S | /* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
#if __riscv_xlen == 64
# define PTRLOG 3
# define SZREG 8
# define REG_S sd
# define REG_L ld
#elif __riscv_xlen == 32
# define PTRLOG 2
# define SZREG 4
# define REG_S sw
# define REG_L lw
#else
# error __riscv_xlen must equal 32 or 64
#endif
#if BYTE_ORDER != LITTLE_ENDIAN
# error
#endif
.text
.globl strcmp
.type strcmp, @function
strcmp:
or a4, a0, a1
li t2, -1
and a4, a4, SZREG-1
bnez a4, .Lmisaligned
#if SZREG == 4
li a5, 0x7f7f7f7f
#else
ld a5, mask
#endif
.macro check_one_word i n
REG_L a2, \i*SZREG(a0)
REG_L a3, \i*SZREG(a1)
and t0, a2, a5
or t1, a2, a5
add t0, t0, a5
or t0, t0, t1
bne t0, t2, .Lnull\i
.if \i+1-\n
bne a2, a3, .Lmismatch
.else
add a0, a0, \n*SZREG
add a1, a1, \n*SZREG
beq a2, a3, .Lloop
# fall through to .Lmismatch
.endif
.endm
.macro foundnull i n
.ifne \i
.Lnull\i:
add a0, a0, \i*SZREG
add a1, a1, \i*SZREG
.ifeq \i-1
.Lnull0:
.endif
bne a2, a3, .Lmisaligned
li a0, 0
ret
.endif
.endm
.Lloop:
# examine full words at a time, favoring strings of a couple dozen chars
#if __riscv_xlen == 32
check_one_word 0 5
check_one_word 1 5
check_one_word 2 5
check_one_word 3 5
check_one_word 4 5
#else
check_one_word 0 3
check_one_word 1 3
check_one_word 2 3
#endif
# backwards branch to .Lloop contained above
.Lmismatch:
# words don't match, but a2 has no null byte.
#if __riscv_xlen == 64
sll a4, a2, 48
sll a5, a3, 48
bne a4, a5, .Lmismatch_upper
sll a4, a2, 32
sll a5, a3, 32
bne a4, a5, .Lmismatch_upper
#endif
sll a4, a2, 16
sll a5, a3, 16
bne a4, a5, .Lmismatch_upper
srl a4, a2, 8*SZREG-16
srl a5, a3, 8*SZREG-16
sub a0, a4, a5
and a1, a0, 0xff
bnez a1, 1f
ret
.Lmismatch_upper:
srl a4, a4, 8*SZREG-16
srl a5, a5, 8*SZREG-16
sub a0, a4, a5
and a1, a0, 0xff
bnez a1, 1f
ret
1:and a4, a4, 0xff
and a5, a5, 0xff
sub a0, a4, a5
ret
.Lmisaligned:
# misaligned
lbu a2, 0(a0)
lbu a3, 0(a1)
add a0, a0, 1
add a1, a1, 1
bne a2, a3, 1f
bnez a2, .Lmisaligned
1:
sub a0, a2, a3
ret
# cases in which a null byte was detected
#if __riscv_xlen == 32
foundnull 0 5
foundnull 1 5
foundnull 2 5
foundnull 3 5
foundnull 4 5
#else
foundnull 0 3
foundnull 1 3
foundnull 2 3
#endif
.size strcmp, .-strcmp
#if SZREG == 8
.section .srodata.cst8,"aM",@progbits,8
.align 3
mask:
.dword 0x7f7f7f7f7f7f7f7f
#endif |
AmmarMorched/STM32_EnergyMonitor_IoT | 22,854 | stm projet/Projetstm_consommation/Core/Startup/startup_stm32f407vgtx.s | /**
******************************************************************************
* @file startup_stm32f407xx.s
* @author MCD Application Team
* @brief STM32F407xx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call the clock system initialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FSMC_IRQHandler /* FSMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word ETH_IRQHandler /* Ethernet */
.word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word 0 /* CRYP crypto */
.word HASH_RNG_IRQHandler /* Hash and Rng */
.word FPU_IRQHandler /* FPU */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FSMC_IRQHandler
.thumb_set FSMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak ETH_IRQHandler
.thumb_set ETH_IRQHandler,Default_Handler
.weak ETH_WKUP_IRQHandler
.thumb_set ETH_WKUP_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak HASH_RNG_IRQHandler
.thumb_set HASH_RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
|
anak1st/rCore | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
anak1st/rCore | 1,640 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
AnastasiosChatzikyriakou09/ACh_Thesis | 3,496 | Dummy/GA_SRRIP-main/GeST/assembly_compilation_ARM/main_original.s | /*
Copyright 2019 ARM Ltd. and University of Cyprus
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
.data
msg:
.ascii "\n"
len = . - msg
.text
.align 2
.global main
.type main, %function
main:
#reg init
LDR x0,=0xAAAAAAAA
LDR x1,=0x55555555
LDR x2,=0x00000000
LDR x3,=0x00000000
LDR x4,=0x55555555
LDR x5,=0x33333333
LDR x6,=0xFFFFFFFF
LDR x7,=0xFFFFFFFE
LDR x8,=0x00000001
LDR x9,=0xCCCCCCCC
LDR x10,=0xAAAAAAAA
LDR x11,=0x55555555
LDR x12,=0x00000000
LDR x13,=0x00000000
LDR x14,=0x55555555
LDR x15,=0x33333333
LDR x16,=0xFFFFFFFF
LDR x17,=0xFFFFFFFE
LDR x18,=0x00000001
LDR x19,=0xCCCCCCCC
LDR x20,=0xAAAAAAAA
LDR x21,=0x55555555
LDR x22,=0x00000000
LDR x23,=0x00000000
LDR x24,=0x55555555
LDR x25,=0x33333333
LDR x26,=0xFFFFFFFF
LDR x27,=0xFFFFFFFE
LDR x28,=0x00000001
LDR x29,=0xCCCCCCCC
LDR x30,=0xCCCCCCCC
LDR D0,=0xAAAAAAAAAAAAAAAA
LDR D1,=0xFFFFFFFFFFFFFFFF
LDR D2,=0x5555555555555555
LDR D3,=0x3333333333333333
LDR D4,=0xCCCCCCCCCCCCCCCC
LDR D5,=0x0000000100000001
LDR D6,=0xFFFFFFFEFFFFFFFE
LDR D7,=0x0000000000000000
LDR D8,=0xAAAAAAAA55555555
LDR D9,=0x55555555AAAAAAAA
LDR D10,=0xAAAAAAAAAAAAAAAA
LDR D11,=0xFFFFFFFFFFFFFFFF
LDR D12,=0x5555555555555555
LDR D13,=0x3333333333333333
LDR D14,=0xCCCCCCCCCCCCCCCC
LDR D15,=0x0000000100000001
LDR D16,=0xFFFFFFFEFFFFFFFE
LDR D17,=0x0000000000000000
LDR D18,=0xAAAAAAAA55555555
LDR D19,=0x55555555AAAAAAAA
LDR D20,=0xAAAAAAAAAAAAAAAA
LDR D21,=0xFFFFFFFFFFFFFFFF
LDR D22,=0x5555555555555555
LDR D23,=0x3333333333333333
LDR D24,=0xCCCCCCCCCCCCCCCC
LDR D25,=0x0000000100000001
LDR D26,=0xFFFFFFFEFFFFFFFE
LDR D27,=0x0000000000000000
LDR D28,=0xAAAAAAAA55555555
LDR D29,=0x55555555AAAAAAAA
LDR D30,=0x55555555AAAAAAAA
LDR x12,=0x200
#0x200 is 512 bytes
SUB x12,sp,x12
MOV x10,x12
MOV x11,10
#x12 will hold the end address
LDR x28, =0x3B9ACA00/*1 billion iterations. x28 to be used for iteration counter if running for fixed iterations*/
Start:
#loop_code
b Start
#SUB x28,x28,1 #uncomment for running for fixed iterations
#CBNZ x28,Start
ret
.size main, .-main
.ident "GCC: (APM-8.0.10-le) 4.9.3 20150218 (prerelease)"
.section .note.GNU-stack,"",%progbits
|
AnastasiosChatzikyriakou09/ACh_Thesis | 2,903 | Dummy/GA_SRRIP-main/GeST/assembly_compilation_x86_gcc/main_original.s | /*
Copyright 2019 ARM Ltd. and University of Cyprus
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
.file "main.s"
.data
.align 32
.simdvalue:
.long 0xaaaaaaaa
.long 0x55555555
.long 0x33333333
.long 0xcccccccc
.long 0xaaaaaaaa
.long 0x55555555
.long 0x33333333
.long 0xcccccccc
.text
.globl main
main:
.LFB0:
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
#reg init
mov $0x55555555, %rax
mov $0x33333333, %rbx
mov $0x22222222, %rdx
mov $0x44444444, %rsi
mov $0x77777777, %rdi
fldpi
fldpi
fldpi
fldpi
fldpi
fldpi
fldpi
vmovdqa .simdvalue(%rip), %ymm0
vmovdqa .simdvalue(%rip), %ymm1
vmovdqa .simdvalue(%rip), %ymm2
vmovdqa .simdvalue(%rip), %ymm3
vmovdqa .simdvalue(%rip), %ymm4
vmovdqa .simdvalue(%rip), %ymm5
vmovdqa .simdvalue(%rip), %ymm6
vmovdqa .simdvalue(%rip), %ymm7
vmovdqa .simdvalue(%rip), %ymm8
vmovdqa .simdvalue(%rip), %ymm9
vmovdqa .simdvalue(%rip), %ymm10
vmovdqa .simdvalue(%rip), %ymm11
vmovdqa .simdvalue(%rip), %ymm12
vmovdqa .simdvalue(%rip), %ymm13
vmovdqa .simdvalue(%rip), %ymm14
vmovdqa .simdvalue(%rip), %ymm15
mov $50000000, %rcx #leave for i--
#subq $304, %rsp
.L2:
#loop_code
#sub $1,%rcx #remove this and below comment for fixed iterations
#cmp $0, %rcx
jmp .L2
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE0:
.ident "GCC: (GNU) 6.4.0"
|
anchees/labo | 468 | lab_00/hello.s | .file "hello.c"
.text
.section .rodata
.LC0:
.string "Hello world!"
.text
.globl main
.type main, @function
main:
.LFB0:
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
leaq .LC0(%rip), %rax
movq %rax, %rdi
call puts@PLT
movl $0, %eax
popq %rbp
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE0:
.size main, .-main
.ident "GCC: (Debian 12.2.0-14) 12.2.0"
.section .note.GNU-stack,"",@progbits
|
andogq/kernel | 1,325 | arch/aarch64/src/boot/boot.s | // Load the relative address (to the PC) of a value.
// The symbol must be within +/- 4Gb of the PC.
.macro ADR_REL register, symbol
// An offset to the page into the upper part of the register
adrp \register, \symbol
// Load the lower bits of the symbol address, which are the offset into the page.
add \register, \register, #:lo12:\symbol
.endm
1:
// Core check: only boot if in EL2
mrs x0, CurrentEL
cmp x0, {CONST_CURRENTEL_EL2}
b.ne 9f // v Loop
// Core check: park if on non-boot core
mrs x1, MPIDR_EL1
and x1, x1, {CONST_CORE_ID_MASK}
cmp x1, {CONST_BOOT_CORE_ID}
b.ne 9f // v Loop
// Initialise BSS
ADR_REL x0, __bss_start
ADR_REL x1, __bss_end_exclusive
// Constantly loop to clear out BSS memory
1:
// If the pointers meet each other, then BSS has been cleared.
cmp x0, x1
b.eq 2f
// Zero out the current pointer location, and increment the pointer by 16 to the next position.
stp xzr, xzr, [x0], #16
b 1b
2: // BSS complete, prepare for Rust
// Set up the stack pointer
ADR_REL x0, __boot_core_stack_end_exclusive
mov sp, x0
b _start_rust
9: // <- Loop
wfe
b 9b // ^ Loop
|
Andrew-LC/myos | 4,552 | src/boot.s | /* Declare constants for the multiboot header. */
.set ALIGN, 1<<0 /* align loaded modules on page boundaries */
.set MEMINFO, 1<<1 /* provide memory map */
.set FLAGS, ALIGN | MEMINFO /* this is the Multiboot 'flag' field */
.set MAGIC, 0x1BADB002 /* 'magic number' lets bootloader find the header */
.set CHECKSUM, -(MAGIC + FLAGS) /* checksum of above, to prove we are multiboot */
/*
Declare a multiboot header that marks the program as a kernel. These are magic
values that are documented in the multiboot standard. The bootloader will
search for this signature in the first 8 KiB of the kernel file, aligned at a
32-bit boundary. The signature is in its own section so the header can be
forced to be within the first 8 KiB of the kernel file.
*/
.section .multiboot
.align 4
.long MAGIC
.long FLAGS
.long CHECKSUM
/*
The multiboot standard does not define the value of the stack pointer register
(esp) and it is up to the kernel to provide a stack. This allocates room for a
small stack by creating a symbol at the bottom of it, then allocating 16384
bytes for it, and finally creating a symbol at the top. The stack grows
downwards on x86. The stack is in its own section so it can be marked nobits,
which means the kernel file is smaller because it does not contain an
uninitialized stack. The stack on x86 must be 16-byte aligned according to the
System V ABI standard and de-facto extensions. The compiler will assume the
stack is properly aligned and failure to align the stack will result in
undefined behavior.
*/
.section .bss
.align 16
stack_bottom:
.skip 4096 * 4 # 16 KiB
stack_top:
/*
The linker script specifies _start as the entry point to the kernel and the
bootloader will jump to this position once the kernel has been loaded. It
doesn't make sense to return from this function as the bootloader is gone.
*/
.section .text
.global _start
.type _start, @function
_start:
/*
The bootloader has loaded us into 32-bit protected mode on a x86
machine. Interrupts are disabled. Paging is disabled. The processor
state is as defined in the multiboot standard. The kernel has full
control of the CPU. The kernel can only make use of hardware features
and any code it provides as part of itself. There's no printf
function, unless the kernel provides its own <stdio.h> header and a
printf implementation. There are no security restrictions, no
safeguards, no debugging mechanisms, only what the kernel provides
itself. It has absolute and complete power over the
machine.
*/
/*
To set up a stack, we set the esp register to point to the top of the
stack (as it grows downwards on x86 systems). This is necessarily done
in assembly as languages such as C cannot function without a stack.
*/
mov stack_top, esp
/*
This is a good place to initialize crucial processor state before the
high-level kernel is entered. It's best to minimize the early
environment where crucial features are offline. Note that the
processor is not fully initialized yet: Features such as floating
point instructions and instruction set extensions are not initialized
yet. The GDT should be loaded here. Paging should be enabled here.
C++ features such as global constructors and exceptions will require
runtime support to work as well.
*/
/*
Enter the high-level kernel. The ABI requires the stack is 16-byte
aligned at the time of the call instruction (which afterwards pushes
the return pointer of size 4 bytes). The stack was originally 16-byte
aligned above and we've pushed a multiple of 16 bytes to the
stack since (pushed 0 bytes so far), so the alignment has thus been
preserved and the call is well defined.
*/
push eax
push ebx
call kernel_main
/*
If the system has nothing more to do, put the computer into an
infinite loop. To do that:
1) Disable interrupts with cli (clear interrupt enable in eflags).
They are already disabled by the bootloader, so this is not needed.
Mind that you might later enable interrupts and return from
kernel_main (which is sort of nonsensical to do).
2) Wait for the next interrupt to arrive with hlt (halt instruction).
Since they are disabled, this will lock up the computer.
3) Jump to the hlt instruction if it ever wakes up due to a
non-maskable interrupt occurring or due to system management mode.
*/
cli
1: hlt
jmp 1b
/*
Set the size of the _start symbol to the current location '.' minus its start.
This is useful when debugging or when you implement call tracing.
*/
.size _start, . - _start
|
andromanged/camer | 8,619 | src/asm/keccakf1600_x86-64-elf.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.globl SHA3_absorb
.type SHA3_absorb,@function
.align 32
SHA3_absorb:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $232,%rsp
.cfi_adjust_cfa_offset 232
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.align 32
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size SHA3_absorb,.-SHA3_absorb
.globl SHA3_squeeze
.type SHA3_squeeze,@function
.align 32
SHA3_squeeze:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-32
subq $32,%rsp
.cfi_adjust_cfa_offset 32
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.align 32
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.cfi_adjust_cfa_offset -56
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.byte 0xf3,0xc3
.cfi_endproc
.size SHA3_squeeze,.-SHA3_squeeze
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
andromanged/camer | 10,572 | src/asm/keccakf1600_x86-64-win64.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.def __KeccakF1600; .scl 3; .type 32; .endef
.p2align 5
__KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.p2align 5
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.globl KeccakF1600
.def KeccakF1600; .scl 2; .type 32; .endef
.p2align 5
KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_KeccakF1600:
movq %rcx,%rdi
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $200,%rsp
.LSEH_body_KeccakF1600:
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_KeccakF1600:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_KeccakF1600:
.globl SHA3_absorb
.def SHA3_absorb; .scl 2; .type 32; .endef
.p2align 5
SHA3_absorb:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_absorb:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $232,%rsp
.LSEH_body_SHA3_absorb:
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.p2align 5
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_SHA3_absorb:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_absorb:
.globl SHA3_squeeze
.def SHA3_squeeze; .scl 2; .type 32; .endef
.p2align 5
SHA3_squeeze:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_squeeze:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %r12
pushq %r13
pushq %r14
subq $32,%rsp
.LSEH_body_SHA3_squeeze:
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.p2align 5
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.LSEH_epilogue_SHA3_squeeze:
mov 8(%rsp),%rdi
mov 16(%rsp),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_squeeze:
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .pdata
.p2align 2
.rva .LSEH_begin_KeccakF1600
.rva .LSEH_body_KeccakF1600
.rva .LSEH_info_KeccakF1600_prologue
.rva .LSEH_body_KeccakF1600
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_info_KeccakF1600_body
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_end_KeccakF1600
.rva .LSEH_info_KeccakF1600_epilogue
.rva .LSEH_begin_SHA3_absorb
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_prologue
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_body
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_end_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_epilogue
.rva .LSEH_begin_SHA3_squeeze
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_prologue
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_body
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_end_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_epilogue
.section .xdata
.p2align 3
.LSEH_info_KeccakF1600_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_KeccakF1600_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x19,0x00
.byte 0x00,0xe4,0x1a,0x00
.byte 0x00,0xd4,0x1b,0x00
.byte 0x00,0xc4,0x1c,0x00
.byte 0x00,0x54,0x1d,0x00
.byte 0x00,0x34,0x1e,0x00
.byte 0x00,0x74,0x20,0x00
.byte 0x00,0x64,0x21,0x00
.byte 0x00,0x01,0x1f,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_KeccakF1600_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_absorb_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x1d,0x00
.byte 0x00,0xe4,0x1e,0x00
.byte 0x00,0xd4,0x1f,0x00
.byte 0x00,0xc4,0x20,0x00
.byte 0x00,0x54,0x21,0x00
.byte 0x00,0x34,0x22,0x00
.byte 0x00,0x74,0x24,0x00
.byte 0x00,0x64,0x25,0x00
.byte 0x00,0x01,0x23,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_squeeze_body:
.byte 1,0,11,0
.byte 0x00,0xe4,0x04,0x00
.byte 0x00,0xd4,0x05,0x00
.byte 0x00,0xc4,0x06,0x00
.byte 0x00,0x74,0x08,0x00
.byte 0x00,0x64,0x09,0x00
.byte 0x00,0x62
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_epilogue:
.byte 1,0,4,0
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0x00,0x00,0x00
|
andromanged/camer | 8,238 | src/asm/keccakf1600_x86-64-osx.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.globl _SHA3_absorb
.p2align 5
_SHA3_absorb:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $232,%rsp
.cfi_adjust_cfa_offset 232
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
L$oop_absorb:
cmpq %rcx,%rdx
jc L$done_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
L$block_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz L$block_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp L$oop_absorb
.p2align 5
L$done_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.globl _SHA3_squeeze
.p2align 5
_SHA3_squeeze:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-32
subq $32,%rsp
.cfi_adjust_cfa_offset 32
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp L$oop_squeeze
.p2align 5
L$oop_squeeze:
cmpq $8,%r13
jb L$tail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz L$done_squeeze
subq $1,%rcx
jnz L$oop_squeeze
movq %rdi,%rcx
call _KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp L$oop_squeeze
L$tail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
L$done_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.cfi_adjust_cfa_offset -56
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
andromanged/camer | 10,572 | src/asm/keccakf1600_x86-64-mingw64.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.def __KeccakF1600; .scl 3; .type 32; .endef
.p2align 5
__KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.p2align 5
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.globl KeccakF1600
.def KeccakF1600; .scl 2; .type 32; .endef
.p2align 5
KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_KeccakF1600:
movq %rcx,%rdi
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $200,%rsp
.LSEH_body_KeccakF1600:
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_KeccakF1600:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_KeccakF1600:
.globl SHA3_absorb
.def SHA3_absorb; .scl 2; .type 32; .endef
.p2align 5
SHA3_absorb:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_absorb:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $232,%rsp
.LSEH_body_SHA3_absorb:
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.p2align 5
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_SHA3_absorb:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_absorb:
.globl SHA3_squeeze
.def SHA3_squeeze; .scl 2; .type 32; .endef
.p2align 5
SHA3_squeeze:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_squeeze:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %r12
pushq %r13
pushq %r14
subq $32,%rsp
.LSEH_body_SHA3_squeeze:
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.p2align 5
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.LSEH_epilogue_SHA3_squeeze:
mov 8(%rsp),%rdi
mov 16(%rsp),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_squeeze:
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .pdata
.p2align 2
.rva .LSEH_begin_KeccakF1600
.rva .LSEH_body_KeccakF1600
.rva .LSEH_info_KeccakF1600_prologue
.rva .LSEH_body_KeccakF1600
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_info_KeccakF1600_body
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_end_KeccakF1600
.rva .LSEH_info_KeccakF1600_epilogue
.rva .LSEH_begin_SHA3_absorb
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_prologue
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_body
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_end_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_epilogue
.rva .LSEH_begin_SHA3_squeeze
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_prologue
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_body
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_end_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_epilogue
.section .xdata
.p2align 3
.LSEH_info_KeccakF1600_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_KeccakF1600_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x19,0x00
.byte 0x00,0xe4,0x1a,0x00
.byte 0x00,0xd4,0x1b,0x00
.byte 0x00,0xc4,0x1c,0x00
.byte 0x00,0x54,0x1d,0x00
.byte 0x00,0x34,0x1e,0x00
.byte 0x00,0x74,0x20,0x00
.byte 0x00,0x64,0x21,0x00
.byte 0x00,0x01,0x1f,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_KeccakF1600_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_absorb_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x1d,0x00
.byte 0x00,0xe4,0x1e,0x00
.byte 0x00,0xd4,0x1f,0x00
.byte 0x00,0xc4,0x20,0x00
.byte 0x00,0x54,0x21,0x00
.byte 0x00,0x34,0x22,0x00
.byte 0x00,0x74,0x24,0x00
.byte 0x00,0x64,0x25,0x00
.byte 0x00,0x01,0x23,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_squeeze_body:
.byte 1,0,11,0
.byte 0x00,0xe4,0x04,0x00
.byte 0x00,0xd4,0x05,0x00
.byte 0x00,0xc4,0x06,0x00
.byte 0x00,0x74,0x08,0x00
.byte 0x00,0x64,0x09,0x00
.byte 0x00,0x62
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_epilogue:
.byte 1,0,4,0
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0x00,0x00,0x00
|
Aqua-218/HikariOS | 6,486 | src/boot/bootloader.S | ; HikariOS Stage0 UEFI Bootloader (ELF Loader)
; Author: SeleniaProject
; License: MIT OR Apache-2.0
; This file is assembled with NASM to generate a PE32+ EFI image.
; All comments are in English as requested.
BITS 64
DEFAULT REL
%define EFI_SUCCESS 0
%define EFI_LOAD_ERROR 0x8000000000000001
%define EFI_BUFFER_TOO_SMALL 0x8000000000000005
%define EFI_UNSUPPORTED 0x8000000000000003
; -----------------------------------------------------------------------------
; EFI_SYSTEM_TABLE structure offsets (UEFI Spec v2.10, Section 4.3)
; Only fields that we need are listed here to avoid pulling in the full header.
; -----------------------------------------------------------------------------
%define EFI_ST_SIGNATURE 0 ; 8 bytes
%define EFI_ST_REVISION 8 ; 4 bytes
%define EFI_ST_CONOUT 80 ; SimpleTextOutputProtocol*
%define EFI_ST_BOOT_SERVICES 112 ; EFI_BOOT_SERVICES*
; -----------------------------------------------------------------------------
; EFI_BOOT_SERVICES function table offsets (UEFI Spec v2.10, Section 4.4)
; We only care about a subset required for Stage0 duties.
; Header size is 24*sizeof(UINTN) bytes.
; -----------------------------------------------------------------------------
%define EFI_BS_HDR_SIZE (24*8)
%define EFI_BS_ALLOCATE_POOL (EFI_BS_HDR_SIZE + 5*8)
%define EFI_BS_FREE_POOL (EFI_BS_HDR_SIZE + 6*8)
%define EFI_BS_LOAD_IMAGE (EFI_BS_HDR_SIZE + 7*8)
%define EFI_BS_START_IMAGE (EFI_BS_HDR_SIZE + 8*8)
%define EFI_BS_EXIT_BOOT_SERVICES (EFI_BS_HDR_SIZE + 26*8)
; -----------------------------------------------------------------------------
; Globals
; -----------------------------------------------------------------------------
GLOBAL efi_main
GLOBAL EfiMain ; Some toolchains expect this symbol name
SECTION .text
ALIGN 16
; -----------------------------------------------------------------------------
; UEFI entry point
; rdi = EFI_HANDLE ImageHandle
; rsi = EFI_SYSTEM_TABLE* SystemTable
; -----------------------------------------------------------------------------
efi_main:
EfiMain:
; Standard prologue
push rbp
mov rbp, rsp
sub rsp, 0x80 ; shadow space & stack alignment
; Store arguments on stack for later use
mov [rbp - 16], rdi ; ImageHandle
mov [rbp - 24], rsi ; SystemTable
mov r15, rsi ; Keep SystemTable in callee-saved reg
; -----------------------------------------------------------------
; Print banner using SimpleTextOutputProtocol.OutputString
; -----------------------------------------------------------------
mov rax, [r15 + EFI_ST_CONOUT]
test rax, rax
jz .skip_banner
mov rcx, rax ; RCX = *this (ConOut)
lea rdx, [rel banner]
mov rax, [rax + 8] ; Offset 8 = OutputString()
call rax
.skip_banner:
; -----------------------------------------------------------------
; Obtain pointer to BootServices
; -----------------------------------------------------------------
mov rbx, [r15 + EFI_ST_BOOT_SERVICES]
; -----------------------------------------------------------------
; Load the kernel ELF image that is embedded after this PE image.
; We rely on LoadImage() with an in-memory buffer (BootPolicy = FALSE).
; -----------------------------------------------------------------
xor rcx, rcx ; BootPolicy = FALSE
mov rdx, [rbp - 16] ; ParentImageHandle
xor r8, r8 ; FilePath = NULL (same device)
lea r9, [rel kernel_bin] ; SourceBuffer
; 5th argument (UINTN SourceSize) via stack (Win64 ABI)
mov rax, kernel_bin_end
sub rax, kernel_bin
mov [rsp + 0x20], rax
; 6th argument (EFI_HANDLE* ImageHandle) on stack
lea rax, [rbp - 32]
mov [rsp + 0x28], rax
; Call LoadImage()
mov rax, [rbx + EFI_BS_LOAD_IMAGE]
call rax
cmp rax, EFI_SUCCESS
jne .load_failed
; New image handle is stored at [rbp-32]
mov r12, [rbp - 32]
; -----------------------------------------------------------------
; Start the newly loaded image. On success this function does not
; return. If it returns, we treat it as a failure.
; -----------------------------------------------------------------
xor rcx, rcx ; ExitDataSize (out)
xor rdx, rdx ; ExitData (out)
mov r8, r12 ; ImageHandle (in)
mov rax, [rbx + EFI_BS_START_IMAGE]
call rax
cmp rax, EFI_SUCCESS
jne .start_failed
.hang:
hlt
jmp .hang
.load_failed:
lea rcx, [rel load_fail_msg]
mov rax, [r15 + EFI_ST_CONOUT]
test rax, rax
jz .hang
mov rdx, [rax + 8]
mov rdi, rcx ; RCX is already banner string, adjust for Win64
mov rcx, rax
call rdx
jmp .hang
.start_failed:
lea rcx, [rel start_fail_msg]
mov rax, [r15 + EFI_ST_CONOUT]
test rax, rax
jz .hang
mov rdx, [rax + 8]
mov rdi, rcx
mov rcx, rax
call rdx
jmp .hang
; -----------------------------------------------------------------------------
; Read-only data
; -----------------------------------------------------------------------------
SECTION .rdata
ALIGN 4
banner db "HikariOS Stage0: Bootloader started", 13, 10, 0
load_fail_msg db "ERROR: LoadImage() failed", 13, 10, 0
start_fail_msg db "ERROR: StartImage() failed", 13, 10, 0
; -----------------------------------------------------------------------------
; Embedded kernel ELF payload will be linked here with objcopy or llvm-objcopy.
; The linker/objcopy step appends the kernel binary to the bootloader, making
; Stage0 completely self-contained. Tools like `build.rs` or the xtask helper
; script are responsible for performing this binary concatenation.
; -----------------------------------------------------------------------------
SECTION .rodata
ALIGN 8
kernel_bin:
; Placeholder zero-byte to satisfy label; real data injected post-build.
db 0
kernel_bin_end:
SECTION .bss
ALIGN 8
; No uninitialised data needed at the moment. |
Aqua-218/HikariOS | 380 | bootloader/src/bootloader.S | ; Stage0 UEFI PE entry stub
; This assembly shim simply jumps to the Rust entry symbol.
BITS 64
default rel
global efi_main
section .text
efi_main:
; RCX = EFI_HANDLE (ImageHandle)
; RDX = EFI_SYSTEM_TABLE* (SystemTable)
mov rdi, rcx ; first argument per System V ABI
mov rsi, rdx ; second argument per System V ABI
extern rust_efi_main
jmp rust_efi_main |
Arcadia-Y/ACore | 496 | os/src/time/timer_trap.s | # handle timer interrupts in M-mode and delegate it to S-mode
.section .text.trap
.globl _timer_trap
.align 2
_timer_trap:
csrrw sp, mscratch, sp
sd t0, 0(sp)
sd t1, 1*8(sp)
sd t2, 2*8(sp)
# set mtimecmp
ld t0, 3*8(sp) # mtimecmp
ld t1, 4*8(sp) # time interval
ld t2, 0(t0)
add t2, t2, t1
sd t2, 0(t0)
# setup sip
li t0, 32
csrw sip, t0
ld t0, 0(sp)
ld t1, 1*8(sp)
ld t2, 2*8(sp)
csrrw sp, mscratch, sp
mret
|
Arcadia-Y/ACore | 559 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
__switch:
# store current context
sd sp, 8(a0)
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# load next context
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
ld sp, 8(a1)
ret
|
Arcadia-Y/ACore | 1,361 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sscratch -> user_stack, sp -> TrapContext
# store registers except x0/sp/tp
sd x1, 1*8(sp)
sd x3, 1*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# store sstatus, sepc
csrr t0, sstatus
sd t0, 32*8(sp)
csrr t0, sepc
sd t0, 33*8(sp)
# store origianl sp
csrr t0, sscratch
sd t0, 2*8(sp)
# load kernel_satp, trap_handler, kernel_sp
ld t0, 34*8(sp)
ld t1, 35*8(sp)
ld sp, 36*8(sp)
# switch to kernel space
sfence.vma
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
# a0: TrapContext in user space, a1: user space satp
__restore:
# switch to user space
sfence.vma
csrw satp, a1
sfence.vma
# set sp, sscratch to TrapContext
csrw sscratch, a0
mv sp, a0
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
argha-saha/riscv-assembler | 14 | test_asm_files/rv32m/mul.s | mul a0, t0, t1 |
argha-saha/riscv-assembler | 39 | test_asm_files/csr/csrrw.s | li x6, 0xA
csrrw x0, mstatus, x6
ebreak |
argha-saha/riscv-assembler | 15,146 | test_asm_files/programs/one_k.s | start: addi t0, x0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
one_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
two_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
three_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
four_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
five_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
six_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
seven_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
eight_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
nine_hundred: addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
addi t0, t0, 1
one_thousand: addi t0, t0, 1 |
argha-saha/riscv-assembler | 46 | test_asm_files/pseudo_instructions/jr.s | auipc ra, 0
addi ra, ra, 12
jr ra
done:
jr ra |
argha-saha/riscv-assembler | 43 | test_asm_files/pseudo_instructions/jal.s | jal func
ebreak
func:
addi a0, x0, 100
ret |
argha-saha/riscv-assembler | 27 | test_asm_files/pseudo_instructions/negw.s | addi t0, x0, 16
negw a0, t0 |
argha-saha/riscv-assembler | 26 | test_asm_files/pseudo_instructions/sgtz.s | addi t0, x0, 4
sgtz a0, t0 |
argha-saha/riscv-assembler | 202 | test_asm_files/pseudo_instructions/li.s | li a0, 100 # Just uses addi
li a1, 1000000 # Expands into lui and addi
beq x0, x0, taken
addi a2, a0, 10
li a3, 0x12345678 # Expands into lui and addi
taken:
addi t0, x0, 16
addi t1, t0, 16 |
argha-saha/riscv-assembler | 17 | test_asm_files/pseudo_instructions/addiw.s | addiw a0, x0, 100 |
argha-saha/riscv-assembler | 65 | test_asm_files/pseudo_instructions/ret.s | start:
jal ra, func
j fail
pass:
j pass
fail:
j fail
func:
ret |
argha-saha/riscv-assembler | 27 | test_asm_files/pseudo_instructions/not.s | addi t0, x0, 100
not a0, t0 |
argha-saha/riscv-assembler | 27 | test_asm_files/pseudo_instructions/neg.s | addi t0, x0, 100
neg a0, t0 |
argha-saha/riscv-assembler | 86 | test_asm_files/pseudo_instructions/j.s | addi t0, x0, 4
j targ
snez a0, t0
addi s0, x0, 8
targ:
addi t1, t0, 12
and t1, t1, t0 |
argha-saha/riscv-assembler | 26 | test_asm_files/pseudo_instructions/seqz.s | addi t0, x0, 4
seqz a0, t0 |
argha-saha/riscv-assembler | 26 | test_asm_files/pseudo_instructions/sltz.s | addi t0, x0, 4
sltz a0, t0 |
argha-saha/riscv-assembler | 25 | test_asm_files/pseudo_instructions/mv.s | addi t0, x0, 32
mv a0, t0 |
argha-saha/riscv-assembler | 113 | test_asm_files/pseudo_instructions/beqz.s | addi a0, x0, 8
addi a1, x0, 8
beqz a0, taken
addi a2, x0, 4
addi a3, a2, 2
taken:
addi a4, x0, 5
addi a5, x0, 10 |
argha-saha/riscv-assembler | 7 | test_asm_files/pseudo_instructions/nop.s | nop
nop |
argha-saha/riscv-assembler | 15 | test_asm_files/pseudo_instructions/subw.s | subw a0, t1, t0 |
argha-saha/riscv-assembler | 29 | test_asm_files/pseudo_instructions/sextw.s | addi t0, x0, 16
sext.w a0, t0 |
argha-saha/riscv-assembler | 26 | test_asm_files/pseudo_instructions/snez.s | addi t0, x0, 4
snez a0, t0 |
argha-saha/riscv-assembler | 116 | test_asm_files/base/addi.s | addi a0, x0, 8
addi a1, x0, 8
beq a0, a1, taken
addi a2, x0, 4
addi a3, a2, 2
taken:
addi a4, x0, 5
addi a5, x0, 10 |
argha-saha/riscv-assembler | 127 | test_asm_files/base/auipc.s | addi t0, t0, 1
addi t0, t0, 1
auipc a0, taken
addi t0, t0, 1
addi t0, t0, 1
taken:
addi t1, t0, 1
addi t1, t0, 1
addi t1, t0, 1 |
argha-saha/riscv-assembler | 125 | test_asm_files/base/lui.s | addi t0, t0, 1
addi t0, t0, 1
lui a0, taken
addi t0, t0, 1
addi t0, t0, 1
taken:
addi t1, t0, 1
addi t1, t0, 1
addi t1, t0, 1 |
Aria-iu/Simple_OS | 222 | kernel/src/entry.S | .section .text.entry
.globl _start
_start:
la sp , boot_stack_top
call simpl_os_main
.section .bss.stack
.globl boot_stack
boot_stack:
.space 4096 * 16
.globl boot_stack_top
boot_stack_top: |
Aria-iu/Simple_OS | 1,831 | kernel/src/link_app.S | .align 3
.section .data
.global _num_app
_num_app:
.quad 7
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_6_end
.global _app_names
_app_names:
.string "00hello_world"
.string "01store_fault"
.string "02power"
.string "03priv_inst"
.string "04priv_csr"
.string "05get_time"
.string "06test_datain"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/00hello_world.bin"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/01store_fault.bin"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/02power.bin"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/03priv_inst.bin"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/04priv_csr.bin"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/05get_time.bin"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../apps/target/riscv64gc-unknown-none-elf/release/06test_datain.bin"
app_6_end:
|
Aria-iu/Simple_OS | 1,588 | kernel/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret |
aschombe/microRISC | 202 | examples/test.s | .text
mov r1, r2
add r1, r2, zr
// this is a comment
// adr r3, age
// test: // this is a comment
// add r1, r1, r1
// sub r2, r1, r2 // this is a comment
// .data
// age: 0
// height: 50
// weight: 60 |
Asecave/BEPL-T3X16 | 80 | c_compiler/Programs/test_out.s | set x0 10
out x0 0
set x0 11
out x0 2
set x0 12
out x0 4
set x0 13
out x0 6
halt |
Asecave/BEPL-T3X16 | 861 | c_compiler/Programs/test_all_instructions.s | # load and store
set x0 10
set x1 1
store x1 x0
load x2 x0
out x2 0
# add
set x1 2
set x2 1
add x2 x2 x1
set x1 7
add x2 x2 x1
out x2 1
set x1 -9
add x2 x2 x1
out x2 1
# jump and return
set x0 32
ssp x0
set x0 jump_test
set x1 -1
set x2 0
set x3 1
set x4 6
jal x0 x1 < x2
out x4 2
jal x0 x2 < x1
out x4 2
jal x0 x2 < x3
out x4 2
jal x0 x1 = x1
out x4 2
jal x0 x1 = x2
out x4 2
jal x0 x1 = x3
out x4 2
jal x0 x3 > x2
out x4 2
jal x0 x2 > x1
out x4 2
jal x0 x1 > x2
out x4 2
set x0 jump_end
j x0
:jump_test
add x4 x4 x1
ret
:jump_end
out x4 2
# add immediate
set x0 5
addi x0 5
out x0 3
addi x0 -9
out x0 3
# subtract
set x0 5
set x1 10
sub x0 x0 x1
out x0 4
set x1 -6
sub x0 x0 x1
out x0 4
# and
set x0 -1
set x1 1
and x0 x0 x1
out x0 5
# xor
set x0 85
xor x0 x0 x0
set x1 1
xor x0 x1 x0
out x0 6
# shift
set x0 8
set x1 3
sft x0 x0 >> x1
out x0 7
halt
|
Asecave/BEPL-T3X16 | 692 | c_compiler/Programs/find_primes.s | # n = 1;
# d = 1;
# loop {
# if (n % d == 0) {
# if (d <= 1) {
# out(n);
# }
# n++;
# d = n;
# d--;
# } else {
# d--;
# }
# }
set x0 1
set x1 1
set x6 5
set x7 31
sft x7 x7 << x6
addi x7 31
sft x7 x7 << x6
addi x7 31
:loop
set x2 modulus
jal x2
set x3 main_mod_branch
set x4 0
j x3 x2 = x4
set x4 1
sub x1 x1 x4
set x3 loop
j x3
:main_mod_branch
set x2 1
set x3 main_next_num
j x3 x1 > x2
out x0 0
:main_next_num
addi x0 1
set x1 0
add x1 x1 x0
addi x1 -1
set x2 loop
j x2 x0 < x7
halt
:modulus
set x2 0
add x2 x2 x0
set x3 modulus_end
set x4 modulus_loop
:modulus_loop
j x3 x2 < x1
sub x2 x2 x1
j x4
:modulus_end
ret |
Asecave/BEPL-T3X16 | 12 | c_compiler/Programs/single.s | j x0 x1 = x1 |
Asecave/BEPL-T3X16 | 861 | c_compiler/Programs/compiler_input.s | # load and store
set x0 10
set x1 1
store x1 x0
load x2 x0
out x2 0
# add
set x1 2
set x2 1
add x2 x2 x1
set x1 7
add x2 x2 x1
out x2 1
set x1 -9
add x2 x2 x1
out x2 1
# jump and return
set x0 32
ssp x0
set x0 jump_test
set x1 -1
set x2 0
set x3 1
set x4 6
jal x0 x1 < x2
out x4 2
jal x0 x2 < x1
out x4 2
jal x0 x2 < x3
out x4 2
jal x0 x1 = x1
out x4 2
jal x0 x1 = x2
out x4 2
jal x0 x1 = x3
out x4 2
jal x0 x3 > x2
out x4 2
jal x0 x2 > x1
out x4 2
jal x0 x1 > x2
out x4 2
set x0 jump_end
j x0
:jump_test
add x4 x4 x1
ret
:jump_end
out x4 2
# add immediate
set x0 5
addi x0 5
out x0 3
addi x0 -9
out x0 3
# subtract
set x0 5
set x1 10
sub x0 x0 x1
out x0 4
set x1 -6
sub x0 x0 x1
out x0 4
# and
set x0 -1
set x1 1
and x0 x0 x1
out x0 5
# xor
set x0 85
xor x0 x0 x0
set x1 1
xor x0 x1 x0
out x0 6
# shift
set x0 8
set x1 3
sft x0 x0 >> x1
out x0 7
halt
|
AshishD5/bhainlink | 4,378 | library/compiler-builtins/compiler-builtins/src/hexagon/dfmul.s | .text
.global __hexagon_muldf3
.type __hexagon_muldf3,@function
.global __qdsp_muldf3 ; .set __qdsp_muldf3, __hexagon_muldf3
.global __hexagon_fast_muldf3 ; .set __hexagon_fast_muldf3, __hexagon_muldf3
.global __hexagon_fast2_muldf3 ; .set __hexagon_fast2_muldf3, __hexagon_muldf3
.p2align 5
__hexagon_muldf3:
{
p0 = dfclass(r1:0,#2)
p0 = dfclass(r3:2,#2)
r13:12 = combine(##0x40000000,#0)
}
{
r13:12 = insert(r1:0,#52,#11 -1)
r5:4 = asl(r3:2,#11 -1)
r28 = #-1024
r9:8 = #1
}
{
r7:6 = mpyu(r4,r13)
r5:4 = insert(r9:8,#2,#62)
}
{
r15:14 = mpyu(r12,r4)
r7:6 += mpyu(r12,r5)
}
{
r7:6 += lsr(r15:14,#32)
r11:10 = mpyu(r13,r5)
r5:4 = combine(##1024 +1024 -4,#0)
}
{
r11:10 += lsr(r7:6,#32)
if (!p0) jump .Lmul_abnormal
p1 = cmp.eq(r14,#0)
p1 = cmp.eq(r6,#0)
}
{
if (!p1) r10 = or(r10,r8)
r6 = extractu(r1,#11,#20)
r7 = extractu(r3,#11,#20)
}
{
r15:14 = neg(r11:10)
r6 += add(r28,r7)
r28 = xor(r1,r3)
}
{
if (!p2.new) r11:10 = r15:14
p2 = cmp.gt(r28,#-1)
p0 = !cmp.gt(r6,r5)
p0 = cmp.gt(r6,r4)
if (!p0.new) jump:nt .Lmul_ovf_unf
}
{
r1:0 = convert_d2df(r11:10)
r6 = add(r6,#-1024 -58)
}
{
r1 += asl(r6,#20)
jumpr r31
}
.falign
.Lpossible_unf1:
{
p0 = cmp.eq(r0,#0)
p0 = bitsclr(r1,r4)
if (!p0.new) jumpr:t r31
r5 = #0x7fff
}
{
p0 = bitsset(r13,r5)
r4 = USR
r5 = #0x030
}
{
if (p0) r4 = or(r4,r5)
}
{
USR = r4
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.falign
.Lmul_ovf_unf:
{
r1:0 = convert_d2df(r11:10)
r13:12 = abs(r11:10)
r7 = add(r6,#-1024 -58)
}
{
r1 += asl(r7,#20)
r7 = extractu(r1,#11,#20)
r4 = ##0x7FEFFFFF
}
{
r7 += add(r6,##-1024 -58)
r5 = #0
}
{
p0 = cmp.gt(r7,##1024 +1024 -2)
if (p0.new) jump:nt .Lmul_ovf
}
{
p0 = cmp.gt(r7,#0)
if (p0.new) jump:nt .Lpossible_unf1
r5 = sub(r6,r5)
r28 = #63
}
{
r4 = #0
r5 = sub(#5,r5)
}
{
p3 = cmp.gt(r11,#-1)
r5 = min(r5,r28)
r11:10 = r13:12
}
{
r28 = USR
r15:14 = extractu(r11:10,r5:4)
}
{
r11:10 = asr(r11:10,r5)
r4 = #0x0030
r1 = insert(r9,#11,#20)
}
{
p0 = cmp.gtu(r9:8,r15:14)
if (!p0.new) r10 = or(r10,r8)
r11 = setbit(r11,#20 +3)
}
{
r15:14 = neg(r11:10)
p1 = bitsclr(r10,#0x7)
if (!p1.new) r28 = or(r4,r28)
}
{
if (!p3) r11:10 = r15:14
USR = r28
}
{
r1:0 = convert_d2df(r11:10)
p0 = dfcmp.eq(r1:0,r1:0)
}
{
r1 = insert(r9,#11 -1,#20 +1)
jumpr r31
}
.falign
.Lmul_ovf:
{
r28 = USR
r13:12 = combine(##0x7fefffff,#-1)
r1:0 = r11:10
}
{
r14 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
r5:4 = combine(##0x7ff00000,#0)
}
{
USR = r28
r14 ^= lsr(r1,#31)
r28 = r14
}
{
p0 = !cmp.eq(r28,#1)
p0 = !cmp.eq(r14,#2)
if (p0.new) r13:12 = r5:4
p0 = dfcmp.eq(r1:0,r1:0)
}
{
r1:0 = insert(r13:12,#63,#0)
jumpr r31
}
.Lmul_abnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r5:4 = extractu(r3:2,#63,#0)
}
{
p3 = cmp.gtu(r13:12,r5:4)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Linvalid_nan
if (!p3) r13:12 = r5:4
if (!p3) r5:4 = r13:12
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x0e)
}
{
p0 = dfclass(r1:0,#0x08)
p0 = dfclass(r3:2,#0x01)
}
{
if (p1) jump .Ltrue_inf
p2 = dfclass(r3:2,#0x01)
}
{
if (p0) jump .Linvalid_zeroinf
if (p2) jump .Ltrue_zero
r28 = ##0x7c000000
}
{
p0 = bitsclr(r1,r28)
if (p0.new) jump:nt .Lmul_tiny
}
{
r28 = cl0(r5:4)
}
{
r28 = add(r28,#-11)
}
{
r5:4 = asl(r5:4,r28)
}
{
r3:2 = insert(r5:4,#63,#0)
r1 -= asl(r28,#20)
}
jump __hexagon_muldf3
.Lmul_tiny:
{
r28 = USR
r1:0 = xor(r1:0,r3:2)
}
{
r28 = or(r28,#0x30)
r1:0 = insert(r9:8,#63,#0)
r5 = extractu(r28,#2,#22)
}
{
USR = r28
p0 = cmp.gt(r5,#1)
if (!p0.new) r0 = #0
r5 ^= lsr(r1,#31)
}
{
p0 = cmp.eq(r5,#3)
if (!p0.new) r0 = #0
jumpr r31
}
.Linvalid_zeroinf:
{
r28 = USR
}
{
r1:0 = #-1
r28 = or(r28,#2)
}
{
USR = r28
}
{
p0 = dfcmp.uo(r1:0,r1:0)
jumpr r31
}
.Linvalid_nan:
{
p0 = dfclass(r3:2,#0x0f)
r28 = convert_df2sf(r1:0)
if (p0.new) r3:2 = r1:0
}
{
r2 = convert_df2sf(r3:2)
r1:0 = #-1
jumpr r31
}
.falign
.Ltrue_zero:
{
r1:0 = r3:2
r3:2 = r1:0
}
.Ltrue_inf:
{
r3 = extract(r3,#1,#31)
}
{
r1 ^= asl(r3,#31)
jumpr r31
}
.size __hexagon_muldf3,.-__hexagon_muldf3
|
AshishD5/bhainlink | 5,659 | library/compiler-builtins/compiler-builtins/src/hexagon/dfdiv.s | .text
.global __hexagon_divdf3
.type __hexagon_divdf3,@function
.global __qdsp_divdf3 ; .set __qdsp_divdf3, __hexagon_divdf3
.global __hexagon_fast_divdf3 ; .set __hexagon_fast_divdf3, __hexagon_divdf3
.global __hexagon_fast2_divdf3 ; .set __hexagon_fast2_divdf3, __hexagon_divdf3
.p2align 5
__hexagon_divdf3:
{
p2 = dfclass(r1:0,#0x02)
p2 = dfclass(r3:2,#0x02)
r13:12 = combine(r3,r1)
r28 = xor(r1,r3)
}
{
if (!p2) jump .Ldiv_abnormal
r7:6 = extractu(r3:2,#23,#52 -23)
r8 = ##0x3f800001
}
{
r9 = or(r8,r6)
r13 = extractu(r13,#11,#52 -32)
r12 = extractu(r12,#11,#52 -32)
p3 = cmp.gt(r28,#-1)
}
.Ldenorm_continue:
{
r11,p0 = sfrecipa(r8,r9)
r10 = and(r8,#-2)
r28 = #1
r12 = sub(r12,r13)
}
{
r10 -= sfmpy(r11,r9):lib
r1 = insert(r28,#11 +1,#52 -32)
r13 = ##0x00800000 << 3
}
{
r11 += sfmpy(r11,r10):lib
r3 = insert(r28,#11 +1,#52 -32)
r10 = and(r8,#-2)
}
{
r10 -= sfmpy(r11,r9):lib
r5 = #-0x3ff +1
r4 = #0x3ff -1
}
{
r11 += sfmpy(r11,r10):lib
p1 = cmp.gt(r12,r5)
p1 = !cmp.gt(r12,r4)
}
{
r13 = insert(r11,#23,#3)
r5:4 = #0
r12 = add(r12,#-61)
}
{
r13 = add(r13,#((-3) << 3))
}
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASL(r7:6, # ( 14 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 1 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 16 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 31 )); r1:0 -= asl(r15:14, # 32); r7:6=# ( 0 ); }
{
r15:14 = sub(r1:0,r3:2)
p0 = cmp.gtu(r3:2,r1:0)
if (!p0.new) r6 = #2
}
{
r5:4 = add(r5:4,r7:6)
if (!p0) r1:0 = r15:14
r15:14 = #0
}
{
p0 = cmp.eq(r1:0,r15:14)
if (!p0.new) r4 = or(r4,r28)
}
{
r7:6 = neg(r5:4)
}
{
if (!p3) r5:4 = r7:6
}
{
r1:0 = convert_d2df(r5:4)
if (!p1) jump .Ldiv_ovf_unf
}
{
r1 += asl(r12,#52 -32)
jumpr r31
}
.Ldiv_ovf_unf:
{
r1 += asl(r12,#52 -32)
r13 = extractu(r1,#11,#52 -32)
}
{
r7:6 = abs(r5:4)
r12 = add(r12,r13)
}
{
p0 = cmp.gt(r12,##0x3ff +0x3ff)
if (p0.new) jump:nt .Ldiv_ovf
}
{
p0 = cmp.gt(r12,#0)
if (p0.new) jump:nt .Lpossible_unf2
}
{
r13 = add(clb(r7:6),#-1)
r12 = sub(#7,r12)
r10 = USR
r11 = #63
}
{
r13 = min(r12,r11)
r11 = or(r10,#0x030)
r7:6 = asl(r7:6,r13)
r12 = #0
}
{
r15:14 = extractu(r7:6,r13:12)
r7:6 = lsr(r7:6,r13)
r3:2 = #1
}
{
p0 = cmp.gtu(r3:2,r15:14)
if (!p0.new) r6 = or(r2,r6)
r7 = setbit(r7,#52 -32+4)
}
{
r5:4 = neg(r7:6)
p0 = bitsclr(r6,#(1<<4)-1)
if (!p0.new) r10 = r11
}
{
USR = r10
if (p3) r5:4 = r7:6
r10 = #-0x3ff -(52 +4)
}
{
r1:0 = convert_d2df(r5:4)
}
{
r1 += asl(r10,#52 -32)
jumpr r31
}
.Lpossible_unf2:
{
r3:2 = extractu(r1:0,#63,#0)
r15:14 = combine(##0x00100000,#0)
r10 = #0x7FFF
}
{
p0 = dfcmp.eq(r15:14,r3:2)
p0 = bitsset(r7,r10)
}
{
if (!p0) jumpr r31
r10 = USR
}
{
r10 = or(r10,#0x30)
}
{
USR = r10
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.Ldiv_ovf:
{
r10 = USR
r3:2 = combine(##0x7fefffff,#-1)
r1 = mux(p3,#0,#-1)
}
{
r7:6 = combine(##0x7ff00000,#0)
r5 = extractu(r10,#2,#22)
r10 = or(r10,#0x28)
}
{
USR = r10
r5 ^= lsr(r1,#31)
r4 = r5
}
{
p0 = !cmp.eq(r4,#1)
p0 = !cmp.eq(r5,#2)
if (p0.new) r3:2 = r7:6
p0 = dfcmp.eq(r3:2,r3:2)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_abnormal:
{
p0 = dfclass(r1:0,#0x0F)
p0 = dfclass(r3:2,#0x0F)
p3 = cmp.gt(r28,#-1)
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x08)
}
{
p2 = dfclass(r1:0,#0x01)
p2 = dfclass(r3:2,#0x01)
}
{
if (!p0) jump .Ldiv_nan
if (p1) jump .Ldiv_invalid
}
{
if (p2) jump .Ldiv_invalid
}
{
p2 = dfclass(r1:0,#(0x0F ^ 0x01))
p2 = dfclass(r3:2,#(0x0F ^ 0x08))
}
{
p1 = dfclass(r1:0,#(0x0F ^ 0x08))
p1 = dfclass(r3:2,#(0x0F ^ 0x01))
}
{
if (!p2) jump .Ldiv_zero_result
if (!p1) jump .Ldiv_inf_result
}
{
p0 = dfclass(r1:0,#0x02)
p1 = dfclass(r3:2,#0x02)
r10 = ##0x00100000
}
{
r13:12 = combine(r3,r1)
r1 = insert(r10,#11 +1,#52 -32)
r3 = insert(r10,#11 +1,#52 -32)
}
{
if (p0) r1 = or(r1,r10)
if (p1) r3 = or(r3,r10)
}
{
r5 = add(clb(r1:0),#-11)
r4 = add(clb(r3:2),#-11)
r10 = #1
}
{
r12 = extractu(r12,#11,#52 -32)
r13 = extractu(r13,#11,#52 -32)
}
{
r1:0 = asl(r1:0,r5)
r3:2 = asl(r3:2,r4)
if (!p0) r12 = sub(r10,r5)
if (!p1) r13 = sub(r10,r4)
}
{
r7:6 = extractu(r3:2,#23,#52 -23)
}
{
r9 = or(r8,r6)
jump .Ldenorm_continue
}
.Ldiv_zero_result:
{
r1 = xor(r1,r3)
r3:2 = #0
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_inf_result:
{
p2 = dfclass(r3:2,#0x01)
p2 = dfclass(r1:0,#(0x0F ^ 0x08))
}
{
r10 = USR
if (!p2) jump 1f
r1 = xor(r1,r3)
}
{
r10 = or(r10,#0x04)
}
{
USR = r10
}
1:
{
r3:2 = combine(##0x7ff00000,#0)
p0 = dfcmp.uo(r3:2,r3:2)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_nan:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfclass(r3:2,#0x10)
if (!p0.new) r1:0 = r3:2
if (!p1.new) r3:2 = r1:0
}
{
r5 = convert_df2sf(r1:0)
r4 = convert_df2sf(r3:2)
}
{
r1:0 = #-1
jumpr r31
}
.Ldiv_invalid:
{
r10 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r10)
jumpr r31
}
.size __hexagon_divdf3,.-__hexagon_divdf3
|
AshishD5/bhainlink | 481 | library/compiler-builtins/compiler-builtins/src/hexagon/umodsi3.s |
FUNCTION_BEGIN __hexagon_umodsi3
{
r2 = cl0(r0)
r3 = cl0(r1)
p0 = cmp.gtu(r1,r0)
}
{
r2 = sub(r3,r2)
if (p0) jumpr r31
}
{
loop0(1f,r2)
p1 = cmp.eq(r2,#0)
r2 = lsl(r1,r2)
}
.falign
1:
{
p0 = cmp.gtu(r2,r0)
if (!p0.new) r0 = sub(r0,r2)
r2 = lsr(r2,#1)
if (p1) r1 = #0
}:endloop0
{
p0 = cmp.gtu(r2,r0)
if (!p0.new) r0 = sub(r0,r1)
jumpr r31
}
FUNCTION_END __hexagon_umodsi3
.globl __qdsp_umodsi3
.set __qdsp_umodsi3, __hexagon_umodsi3
|
AshishD5/bhainlink | 677 | library/compiler-builtins/compiler-builtins/src/hexagon/udivmoddi4.s |
FUNCTION_BEGIN __hexagon_udivmoddi4
{
r6 = cl0(r1:0)
r7 = cl0(r3:2)
r5:4 = r3:2
r3:2 = r1:0
}
{
r10 = sub(r7,r6)
r1:0 = #0
r15:14 = #1
}
{
r11 = add(r10,#1)
r13:12 = lsl(r5:4,r10)
r15:14 = lsl(r15:14,r10)
}
{
p0 = cmp.gtu(r5:4,r3:2)
loop0(1f,r11)
}
{
if (p0) jumpr r31
}
.falign
1:
{
p0 = cmp.gtu(r13:12,r3:2)
}
{
r7:6 = sub(r3:2, r13:12)
r9:8 = add(r1:0, r15:14)
}
{
r1:0 = vmux(p0, r1:0, r9:8)
r3:2 = vmux(p0, r3:2, r7:6)
}
{
r15:14 = lsr(r15:14, #1)
r13:12 = lsr(r13:12, #1)
}:endloop0
{
jumpr r31
}
FUNCTION_END __hexagon_udivmoddi4
.globl __qdsp_udivmoddi4
.set __qdsp_udivmoddi4, __hexagon_udivmoddi4
|
AshishD5/bhainlink | 158 | library/compiler-builtins/compiler-builtins/src/hexagon/func_macro.s | .macro FUNCTION_BEGIN name
.text
.p2align 5
.globl \name
.type \name, @function
\name:
.endm
.macro FUNCTION_END name
.size \name, . - \name
.endm
|
AshishD5/bhainlink | 662 | library/compiler-builtins/compiler-builtins/src/hexagon/udivdi3.s |
FUNCTION_BEGIN __hexagon_udivdi3
{
r6 = cl0(r1:0)
r7 = cl0(r3:2)
r5:4 = r3:2
r3:2 = r1:0
}
{
r10 = sub(r7,r6)
r1:0 = #0
r15:14 = #1
}
{
r11 = add(r10,#1)
r13:12 = lsl(r5:4,r10)
r15:14 = lsl(r15:14,r10)
}
{
p0 = cmp.gtu(r5:4,r3:2)
loop0(1f,r11)
}
{
if (p0) jumpr r31
}
.falign
1:
{
p0 = cmp.gtu(r13:12,r3:2)
}
{
r7:6 = sub(r3:2, r13:12)
r9:8 = add(r1:0, r15:14)
}
{
r1:0 = vmux(p0, r1:0, r9:8)
r3:2 = vmux(p0, r3:2, r7:6)
}
{
r15:14 = lsr(r15:14, #1)
r13:12 = lsr(r13:12, #1)
}:endloop0
{
jumpr r31
}
FUNCTION_END __hexagon_udivdi3
.globl __qdsp_udivdi3
.set __qdsp_udivdi3, __hexagon_udivdi3
|
AshishD5/bhainlink | 4,801 | library/compiler-builtins/compiler-builtins/src/hexagon/dfaddsub.s | .text
.global __hexagon_adddf3
.global __hexagon_subdf3
.type __hexagon_adddf3, @function
.type __hexagon_subdf3, @function
.global __qdsp_adddf3 ; .set __qdsp_adddf3, __hexagon_adddf3
.global __hexagon_fast_adddf3 ; .set __hexagon_fast_adddf3, __hexagon_adddf3
.global __hexagon_fast2_adddf3 ; .set __hexagon_fast2_adddf3, __hexagon_adddf3
.global __qdsp_subdf3 ; .set __qdsp_subdf3, __hexagon_subdf3
.global __hexagon_fast_subdf3 ; .set __hexagon_fast_subdf3, __hexagon_subdf3
.global __hexagon_fast2_subdf3 ; .set __hexagon_fast2_subdf3, __hexagon_subdf3
.p2align 5
__hexagon_adddf3:
{
r4 = extractu(r1,#11,#20)
r5 = extractu(r3,#11,#20)
r13:12 = combine(##0x20000000,#0)
}
{
p3 = dfclass(r1:0,#2)
p3 = dfclass(r3:2,#2)
r9:8 = r13:12
p2 = cmp.gtu(r5,r4)
}
{
if (!p3) jump .Ladd_abnormal
if (p2) r1:0 = r3:2
if (p2) r3:2 = r1:0
if (p2) r5:4 = combine(r4,r5)
}
{
r13:12 = insert(r1:0,#52,#11 -2)
r9:8 = insert(r3:2,#52,#11 -2)
r15 = sub(r4,r5)
r7:6 = combine(#62,#1)
}
.Ladd_continue:
{
r15 = min(r15,r7)
r11:10 = neg(r13:12)
p2 = cmp.gt(r1,#-1)
r14 = #0
}
{
if (!p2) r13:12 = r11:10
r11:10 = extractu(r9:8,r15:14)
r9:8 = ASR(r9:8,r15)
r15:14 = #0
}
{
p1 = cmp.eq(r11:10,r15:14)
if (!p1.new) r8 = or(r8,r6)
r5 = add(r4,#-1024 -60)
p3 = cmp.gt(r3,#-1)
}
{
r13:12 = add(r13:12,r9:8)
r11:10 = sub(r13:12,r9:8)
r7:6 = combine(#54,##2045)
}
{
p0 = cmp.gtu(r4,r7)
p0 = !cmp.gtu(r4,r6)
if (!p0.new) jump:nt .Ladd_ovf_unf
if (!p3) r13:12 = r11:10
}
{
r1:0 = convert_d2df(r13:12)
p0 = cmp.eq(r13,#0)
p0 = cmp.eq(r12,#0)
if (p0.new) jump:nt .Ladd_zero
}
{
r1 += asl(r5,#20)
jumpr r31
}
.falign
__hexagon_subdf3:
{
r3 = togglebit(r3,#31)
jump __qdsp_adddf3
}
.falign
.Ladd_zero:
{
r28 = USR
r1:0 = #0
r3 = #1
}
{
r28 = extractu(r28,#2,#22)
r3 = asl(r3,#31)
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = xor(r1,r3)
jumpr r31
}
.falign
.Ladd_ovf_unf:
{
r1:0 = convert_d2df(r13:12)
p0 = cmp.eq(r13,#0)
p0 = cmp.eq(r12,#0)
if (p0.new) jump:nt .Ladd_zero
}
{
r28 = extractu(r1,#11,#20)
r1 += asl(r5,#20)
}
{
r5 = add(r5,r28)
r3:2 = combine(##0x00100000,#0)
}
{
p0 = cmp.gt(r5,##1024 +1024 -2)
if (p0.new) jump:nt .Ladd_ovf
}
{
p0 = cmp.gt(r5,#0)
if (p0.new) jumpr:t r31
r28 = sub(#1,r5)
}
{
r3:2 = insert(r1:0,#52,#0)
r1:0 = r13:12
}
{
r3:2 = lsr(r3:2,r28)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.falign
.Ladd_ovf:
{
r1:0 = r13:12
r28 = USR
r13:12 = combine(##0x7fefffff,#-1)
}
{
r5 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
r9:8 = combine(##0x7ff00000,#0)
}
{
USR = r28
r5 ^= lsr(r1,#31)
r28 = r5
}
{
p0 = !cmp.eq(r28,#1)
p0 = !cmp.eq(r5,#2)
if (p0.new) r13:12 = r9:8
}
{
r1:0 = insert(r13:12,#63,#0)
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.Ladd_abnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r9:8 = extractu(r3:2,#63,#0)
}
{
p3 = cmp.gtu(r13:12,r9:8)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Linvalid_nan_add
if (!p3) r13:12 = r9:8
if (!p3) r9:8 = r13:12
}
{
p1 = dfclass(r1:0,#0x08)
if (p1.new) jump:nt .Linf_add
}
{
p2 = dfclass(r3:2,#0x01)
if (p2.new) jump:nt .LB_zero
r13:12 = #0
}
{
p0 = dfclass(r1:0,#4)
if (p0.new) jump:nt .Ladd_two_subnormal
r13:12 = combine(##0x20000000,#0)
}
{
r4 = extractu(r1,#11,#20)
r5 = #1
r9:8 = asl(r9:8,#11 -2)
}
{
r13:12 = insert(r1:0,#52,#11 -2)
r15 = sub(r4,r5)
r7:6 = combine(#62,#1)
jump .Ladd_continue
}
.Ladd_two_subnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r9:8 = extractu(r3:2,#63,#0)
}
{
r13:12 = neg(r13:12)
r9:8 = neg(r9:8)
p0 = cmp.gt(r1,#-1)
p1 = cmp.gt(r3,#-1)
}
{
if (p0) r13:12 = r1:0
if (p1) r9:8 = r3:2
}
{
r13:12 = add(r13:12,r9:8)
}
{
r9:8 = neg(r13:12)
p0 = cmp.gt(r13,#-1)
r3:2 = #0
}
{
if (!p0) r1:0 = r9:8
if (p0) r1:0 = r13:12
r3 = ##0x80000000
}
{
if (!p0) r1 = or(r1,r3)
p0 = dfcmp.eq(r1:0,r3:2)
if (p0.new) jump:nt .Lzero_plus_zero
}
{
jumpr r31
}
.Linvalid_nan_add:
{
r28 = convert_df2sf(r1:0)
p0 = dfclass(r3:2,#0x0f)
if (p0.new) r3:2 = r1:0
}
{
r2 = convert_df2sf(r3:2)
r1:0 = #-1
jumpr r31
}
.falign
.LB_zero:
{
p0 = dfcmp.eq(r13:12,r1:0)
if (!p0.new) jumpr:t r31
}
.Lzero_plus_zero:
{
p0 = cmp.eq(r1:0,r3:2)
if (p0.new) jumpr:t r31
}
{
r28 = USR
}
{
r28 = extractu(r28,#2,#22)
r1:0 = #0
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
jumpr r31
}
.Linf_add:
{
p0 = !cmp.eq(r1,r3)
p0 = dfclass(r3:2,#8)
if (!p0.new) jumpr:t r31
}
{
r2 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r2)
jumpr r31
}
.size __hexagon_adddf3,.-__hexagon_adddf3
|
AshishD5/bhainlink | 785 | library/compiler-builtins/compiler-builtins/src/hexagon/sfdiv_opt.s |
FUNCTION_BEGIN __hexagon_divsf3
{
r2,p0 = sfrecipa(r0,r1)
r4 = sffixupd(r0,r1)
r3 = ##0x3f800000
}
{
r5 = sffixupn(r0,r1)
r3 -= sfmpy(r4,r2):lib
r6 = ##0x80000000
r7 = r3
}
{
r2 += sfmpy(r3,r2):lib
r3 = r7
r6 = r5
r0 = and(r6,r5)
}
{
r3 -= sfmpy(r4,r2):lib
r0 += sfmpy(r5,r2):lib
}
{
r2 += sfmpy(r3,r2):lib
r6 -= sfmpy(r0,r4):lib
}
{
r0 += sfmpy(r6,r2):lib
}
{
r5 -= sfmpy(r0,r4):lib
}
{
r0 += sfmpy(r5,r2,p0):scale
jumpr r31
}
FUNCTION_END __hexagon_divsf3
.global __qdsp_divsf3 ; .set __qdsp_divsf3, __hexagon_divsf3
.global __hexagon_fast_divsf3 ; .set __hexagon_fast_divsf3, __hexagon_divsf3
.global __hexagon_fast2_divsf3 ; .set __hexagon_fast2_divsf3, __hexagon_divsf3
|
AshishD5/bhainlink | 584 | library/compiler-builtins/compiler-builtins/src/hexagon/modsi3.s |
FUNCTION_BEGIN __hexagon_modsi3
{
p2 = cmp.ge(r0,#0)
r2 = abs(r0)
r1 = abs(r1)
}
{
r3 = cl0(r2)
r4 = cl0(r1)
p0 = cmp.gtu(r1,r2)
}
{
r3 = sub(r4,r3)
if (p0) jumpr r31
}
{
p1 = cmp.eq(r3,#0)
loop0(1f,r3)
r0 = r2
r2 = lsl(r1,r3)
}
.falign
1:
{
p0 = cmp.gtu(r2,r0)
if (!p0.new) r0 = sub(r0,r2)
r2 = lsr(r2,#1)
if (p1) r1 = #0
}:endloop0
{
p0 = cmp.gtu(r2,r0)
if (!p0.new) r0 = sub(r0,r1)
if (p2) jumpr r31
}
{
r0 = neg(r0)
jumpr r31
}
FUNCTION_END __hexagon_modsi3
.globl __qdsp_modsi3
.set __qdsp_modsi3, __hexagon_modsi3
|
AshishD5/bhainlink | 7,236 | library/compiler-builtins/compiler-builtins/src/hexagon/dffma.s | .text
.global __hexagon_fmadf4
.type __hexagon_fmadf4,@function
.global __hexagon_fmadf5
.type __hexagon_fmadf5,@function
.global __qdsp_fmadf5 ; .set __qdsp_fmadf5, __hexagon_fmadf5
.p2align 5
__hexagon_fmadf4:
__hexagon_fmadf5:
fma:
{
p0 = dfclass(r1:0,#2)
p0 = dfclass(r3:2,#2)
r13:12 = #0
r15:14 = #0
}
{
r13:12 = insert(r1:0,#52,#11 -3)
r15:14 = insert(r3:2,#52,#11 -3)
r7 = ##0x10000000
allocframe(#32)
}
{
r9:8 = mpyu(r12,r14)
if (!p0) jump .Lfma_abnormal_ab
r13 = or(r13,r7)
r15 = or(r15,r7)
}
{
p0 = dfclass(r5:4,#2)
if (!p0.new) jump:nt .Lfma_abnormal_c
r11:10 = combine(r7,#0)
r7:6 = combine(#0,r9)
}
.Lfma_abnormal_c_restart:
{
r7:6 += mpyu(r14,r13)
r11:10 = insert(r5:4,#52,#11 -3)
memd(r29+#0) = r17:16
memd(r29+#8) = r19:18
}
{
r7:6 += mpyu(r12,r15)
r19:18 = neg(r11:10)
p0 = cmp.gt(r5,#-1)
r28 = xor(r1,r3)
}
{
r18 = extractu(r1,#11,#20)
r19 = extractu(r3,#11,#20)
r17:16 = combine(#0,r7)
if (!p0) r11:10 = r19:18
}
{
r17:16 += mpyu(r13,r15)
r9:8 = combine(r6,r8)
r18 = add(r18,r19)
r19 = extractu(r5,#11,#20)
}
{
r18 = add(r18,#-1023 +(4))
p3 = !cmp.gt(r28,#-1)
r7:6 = #0
r15:14 = #0
}
{
r7:6 = sub(r7:6,r9:8,p3):carry
p0 = !cmp.gt(r28,#-1)
p1 = cmp.gt(r19,r18)
if (p1.new) r19:18 = combine(r18,r19)
}
{
r15:14 = sub(r15:14,r17:16,p3):carry
if (p0) r9:8 = r7:6
r7:6 = #0
r19 = sub(r18,r19)
}
{
if (p0) r17:16 = r15:14
p0 = cmp.gt(r19,#63)
if (p1) r9:8 = r7:6
if (p1) r7:6 = r9:8
}
{
if (p1) r17:16 = r11:10
if (p1) r11:10 = r17:16
if (p0) r19 = add(r19,#-64)
r28 = #63
}
{
if (p0) r7:6 = r11:10
r28 = asr(r11,#31)
r13 = min(r19,r28)
r12 = #0
}
{
if (p0) r11:10 = combine(r28,r28)
r5:4 = extract(r7:6,r13:12)
r7:6 = lsr(r7:6,r13)
r12 = sub(#64,r13)
}
{
r15:14 = #0
r28 = #-2
r7:6 |= lsl(r11:10,r12)
r11:10 = asr(r11:10,r13)
}
{
p3 = cmp.gtu(r5:4,r15:14)
if (p3.new) r6 = and(r6,r28)
r15:14 = #1
r5:4 = #0
}
{
r9:8 = add(r7:6,r9:8,p3):carry
}
{
r17:16 = add(r11:10,r17:16,p3):carry
r28 = #62
}
{
r12 = add(clb(r17:16),#-2)
if (!cmp.eq(r12.new,r28)) jump:t 1f
}
{
r11:10 = extractu(r9:8,#62,#2)
r9:8 = asl(r9:8,#62)
r18 = add(r18,#-62)
}
{
r17:16 = insert(r11:10,#62,#0)
}
{
r12 = add(clb(r17:16),#-2)
}
.falign
1:
{
r11:10 = asl(r17:16,r12)
r5:4 |= asl(r9:8,r12)
r13 = sub(#64,r12)
r18 = sub(r18,r12)
}
{
r11:10 |= lsr(r9:8,r13)
p2 = cmp.gtu(r15:14,r5:4)
r28 = #1023 +1023 -2
}
{
if (!p2) r10 = or(r10,r14)
p0 = !cmp.gt(r18,r28)
p0 = cmp.gt(r18,#1)
if (!p0.new) jump:nt .Lfma_ovf_unf
}
{
p0 = cmp.gtu(r15:14,r11:10)
r1:0 = convert_d2df(r11:10)
r18 = add(r18,#-1023 -60)
r17:16 = memd(r29+#0)
}
{
r1 += asl(r18,#20)
r19:18 = memd(r29+#8)
if (!p0) dealloc_return
}
.Ladd_yields_zero:
{
r28 = USR
r1:0 = #0
}
{
r28 = extractu(r28,#2,#22)
r17:16 = memd(r29+#0)
r19:18 = memd(r29+#8)
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
dealloc_return
}
.Lfma_ovf_unf:
{
p0 = cmp.gtu(r15:14,r11:10)
if (p0.new) jump:nt .Ladd_yields_zero
}
{
r1:0 = convert_d2df(r11:10)
r18 = add(r18,#-1023 -60)
r28 = r18
}
{
r1 += asl(r18,#20)
r7 = extractu(r1,#11,#20)
}
{
r6 = add(r18,r7)
r17:16 = memd(r29+#0)
r19:18 = memd(r29+#8)
r9:8 = abs(r11:10)
}
{
p0 = cmp.gt(r6,##1023 +1023)
if (p0.new) jump:nt .Lfma_ovf
}
{
p0 = cmp.gt(r6,#0)
if (p0.new) jump:nt .Lpossible_unf0
}
{
r7 = add(clb(r9:8),#-2)
r6 = sub(#1+5,r28)
p3 = cmp.gt(r11,#-1)
}
{
r6 = add(r6,r7)
r9:8 = asl(r9:8,r7)
r1 = USR
r28 = #63
}
{
r7 = min(r6,r28)
r6 = #0
r0 = #0x0030
}
{
r3:2 = extractu(r9:8,r7:6)
r9:8 = asr(r9:8,r7)
}
{
p0 = cmp.gtu(r15:14,r3:2)
if (!p0.new) r8 = or(r8,r14)
r9 = setbit(r9,#20 +3)
}
{
r11:10 = neg(r9:8)
p1 = bitsclr(r8,#(1<<3)-1)
if (!p1.new) r1 = or(r1,r0)
r3:2 = #0
}
{
if (p3) r11:10 = r9:8
USR = r1
r28 = #-1023 -(52 +3)
}
{
r1:0 = convert_d2df(r11:10)
}
{
r1 += asl(r28,#20)
dealloc_return
}
.Lpossible_unf0:
{
r28 = ##0x7fefffff
r9:8 = abs(r11:10)
}
{
p0 = cmp.eq(r0,#0)
p0 = bitsclr(r1,r28)
if (!p0.new) dealloc_return:t
r28 = #0x7fff
}
{
p0 = bitsset(r9,r28)
r3 = USR
r2 = #0x0030
}
{
if (p0) r3 = or(r3,r2)
}
{
USR = r3
}
{
p0 = dfcmp.eq(r1:0,r1:0)
dealloc_return
}
.Lfma_ovf:
{
r28 = USR
r11:10 = combine(##0x7fefffff,#-1)
r1:0 = r11:10
}
{
r9:8 = combine(##0x7ff00000,#0)
r3 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
}
{
USR = r28
r3 ^= lsr(r1,#31)
r2 = r3
}
{
p0 = !cmp.eq(r2,#1)
p0 = !cmp.eq(r3,#2)
}
{
p0 = dfcmp.eq(r9:8,r9:8)
if (p0.new) r11:10 = r9:8
}
{
r1:0 = insert(r11:10,#63,#0)
dealloc_return
}
.Lfma_abnormal_ab:
{
r9:8 = extractu(r1:0,#63,#0)
r11:10 = extractu(r3:2,#63,#0)
deallocframe
}
{
p3 = cmp.gtu(r9:8,r11:10)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Lnan
if (!p3) r9:8 = r11:10
if (!p3) r11:10 = r9:8
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x0e)
}
{
p0 = dfclass(r1:0,#0x08)
p0 = dfclass(r3:2,#0x01)
}
{
if (p1) jump .Lab_inf
p2 = dfclass(r3:2,#0x01)
}
{
if (p0) jump .Linvalid
if (p2) jump .Lab_true_zero
r28 = ##0x7c000000
}
{
p0 = bitsclr(r1,r28)
if (p0.new) jump:nt .Lfma_ab_tiny
}
{
r28 = add(clb(r11:10),#-11)
}
{
r11:10 = asl(r11:10,r28)
}
{
r3:2 = insert(r11:10,#63,#0)
r1 -= asl(r28,#20)
}
jump fma
.Lfma_ab_tiny:
r9:8 = combine(##0x00100000,#0)
{
r1:0 = insert(r9:8,#63,#0)
r3:2 = insert(r9:8,#63,#0)
}
jump fma
.Lab_inf:
{
r3:2 = lsr(r3:2,#63)
p0 = dfclass(r5:4,#0x10)
}
{
r1:0 ^= asl(r3:2,#63)
if (p0) jump .Lnan
}
{
p1 = dfclass(r5:4,#0x08)
if (p1.new) jump:nt .Lfma_inf_plus_inf
}
{
jumpr r31
}
.falign
.Lfma_inf_plus_inf:
{
p0 = dfcmp.eq(r1:0,r5:4)
if (!p0.new) jump:nt .Linvalid
}
{
jumpr r31
}
.Lnan:
{
p0 = dfclass(r3:2,#0x10)
p1 = dfclass(r5:4,#0x10)
if (!p0.new) r3:2 = r1:0
if (!p1.new) r5:4 = r1:0
}
{
r3 = convert_df2sf(r3:2)
r2 = convert_df2sf(r5:4)
}
{
r3 = convert_df2sf(r1:0)
r1:0 = #-1
jumpr r31
}
.Linvalid:
{
r28 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r28)
jumpr r31
}
.Lab_true_zero:
{
p0 = dfclass(r5:4,#0x10)
if (p0.new) jump:nt .Lnan
if (p0.new) r1:0 = r5:4
}
{
p0 = dfcmp.eq(r3:2,r5:4)
r1 = lsr(r1,#31)
}
{
r3 ^= asl(r1,#31)
if (!p0) r1:0 = r5:4
if (!p0) jumpr r31
}
{
p0 = cmp.eq(r3:2,r5:4)
if (p0.new) jumpr:t r31
r1:0 = r3:2
}
{
r28 = USR
}
{
r28 = extractu(r28,#2,#22)
r1:0 = #0
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
jumpr r31
}
.falign
.Lfma_abnormal_c:
{
p0 = dfclass(r5:4,#0x10)
if (p0.new) jump:nt .Lnan
if (p0.new) r1:0 = r5:4
deallocframe
}
{
p0 = dfclass(r5:4,#0x08)
if (p0.new) r1:0 = r5:4
if (p0.new) jumpr:nt r31
}
{
p0 = dfclass(r5:4,#0x01)
if (p0.new) jump:nt __hexagon_muldf3
r28 = #1
}
{
allocframe(#32)
r11:10 = #0
r5 = insert(r28,#11,#20)
jump .Lfma_abnormal_c_restart
}
.size fma,.-fma
|
AshishD5/bhainlink | 4,337 | library/compiler-builtins/compiler-builtins/src/hexagon/dfsqrt.s | .text
.global __hexagon_sqrtdf2
.type __hexagon_sqrtdf2,@function
.global __hexagon_sqrt
.type __hexagon_sqrt,@function
.global __qdsp_sqrtdf2 ; .set __qdsp_sqrtdf2, __hexagon_sqrtdf2; .type __qdsp_sqrtdf2,@function
.global __qdsp_sqrt ; .set __qdsp_sqrt, __hexagon_sqrt; .type __qdsp_sqrt,@function
.global __hexagon_fast_sqrtdf2 ; .set __hexagon_fast_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast_sqrtdf2,@function
.global __hexagon_fast_sqrt ; .set __hexagon_fast_sqrt, __hexagon_sqrt; .type __hexagon_fast_sqrt,@function
.global __hexagon_fast2_sqrtdf2 ; .set __hexagon_fast2_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast2_sqrtdf2,@function
.global __hexagon_fast2_sqrt ; .set __hexagon_fast2_sqrt, __hexagon_sqrt; .type __hexagon_fast2_sqrt,@function
.type sqrt,@function
.p2align 5
__hexagon_sqrtdf2:
__hexagon_sqrt:
{
r15:14 = extractu(r1:0,#23 +1,#52 -23)
r28 = extractu(r1,#11,#52 -32)
r5:4 = combine(##0x3f000004,#1)
}
{
p2 = dfclass(r1:0,#0x02)
p2 = cmp.gt(r1,#-1)
if (!p2.new) jump:nt .Lsqrt_abnormal
r9 = or(r5,r14)
}
.Ldenormal_restart:
{
r11:10 = r1:0
r7,p0 = sfinvsqrta(r9)
r5 = and(r5,#-16)
r3:2 = #0
}
{
r3 += sfmpy(r7,r9):lib
r2 += sfmpy(r7,r5):lib
r6 = r5
r9 = and(r28,#1)
}
{
r6 -= sfmpy(r3,r2):lib
r11 = insert(r4,#11 +1,#52 -32)
p1 = cmp.gtu(r9,#0)
}
{
r3 += sfmpy(r3,r6):lib
r2 += sfmpy(r2,r6):lib
r6 = r5
r9 = mux(p1,#8,#9)
}
{
r6 -= sfmpy(r3,r2):lib
r11:10 = asl(r11:10,r9)
r9 = mux(p1,#3,#2)
}
{
r2 += sfmpy(r2,r6):lib
r15:14 = asl(r11:10,r9)
}
{
r2 = and(r2,##0x007fffff)
}
{
r2 = add(r2,##0x00800000 - 3)
r9 = mux(p1,#7,#8)
}
{
r8 = asl(r2,r9)
r9 = mux(p1,#15-(1+1),#15-(1+0))
}
{
r13:12 = mpyu(r8,r15)
}
{
r1:0 = asl(r11:10,#15)
r15:14 = mpyu(r13,r13)
p1 = cmp.eq(r0,r0)
}
{
r1:0 -= asl(r15:14,#15)
r15:14 = mpyu(r13,r12)
p2 = cmp.eq(r0,r0)
}
{
r1:0 -= lsr(r15:14,#16)
p3 = cmp.eq(r0,r0)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
r9 = add(r9,#16)
r1:0 = asl(r11:10,#31)
}
{
r15:14 = mpyu(r13,r13)
r1:0 -= mpyu(r13,r12)
}
{
r1:0 -= asl(r15:14,#31)
r15:14 = mpyu(r12,r12)
}
{
r1:0 -= lsr(r15:14,#33)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
r9 = add(r9,#16)
r1:0 = asl(r11:10,#47)
}
{
r15:14 = mpyu(r13,r13)
}
{
r1:0 -= asl(r15:14,#47)
r15:14 = mpyu(r13,r12)
}
{
r1:0 -= asl(r15:14,#16)
r15:14 = mpyu(r12,r12)
}
{
r1:0 -= lsr(r15:14,#17)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
}
{
r3:2 = mpyu(r13,r12)
r5:4 = mpyu(r12,r12)
r15:14 = #0
r1:0 = #0
}
{
r3:2 += lsr(r5:4,#33)
r5:4 += asl(r3:2,#33)
p1 = cmp.eq(r0,r0)
}
{
r7:6 = mpyu(r13,r13)
r1:0 = sub(r1:0,r5:4,p1):carry
r9:8 = #1
}
{
r7:6 += lsr(r3:2,#31)
r9:8 += asl(r13:12,#1)
}
{
r15:14 = sub(r11:10,r7:6,p1):carry
r5:4 = sub(r1:0,r9:8,p2):carry
r7:6 = #1
r11:10 = #0
}
{
r3:2 = sub(r15:14,r11:10,p2):carry
r7:6 = add(r13:12,r7:6)
r28 = add(r28,#-0x3ff)
}
{
if (p2) r13:12 = r7:6
if (p2) r1:0 = r5:4
if (p2) r15:14 = r3:2
}
{
r5:4 = sub(r1:0,r9:8,p3):carry
r7:6 = #1
r28 = asr(r28,#1)
}
{
r3:2 = sub(r15:14,r11:10,p3):carry
r7:6 = add(r13:12,r7:6)
}
{
if (p3) r13:12 = r7:6
if (p3) r1:0 = r5:4
r2 = #1
}
{
p0 = cmp.eq(r1:0,r11:10)
if (!p0.new) r12 = or(r12,r2)
r3 = cl0(r13:12)
r28 = add(r28,#-63)
}
{
r1:0 = convert_ud2df(r13:12)
r28 = add(r28,r3)
}
{
r1 += asl(r28,#52 -32)
jumpr r31
}
.Lsqrt_abnormal:
{
p0 = dfclass(r1:0,#0x01)
if (p0.new) jumpr:t r31
}
{
p0 = dfclass(r1:0,#0x10)
if (p0.new) jump:nt .Lsqrt_nan
}
{
p0 = cmp.gt(r1,#-1)
if (!p0.new) jump:nt .Lsqrt_invalid_neg
if (!p0.new) r28 = ##0x7F800001
}
{
p0 = dfclass(r1:0,#0x08)
if (p0.new) jumpr:nt r31
}
{
r1:0 = extractu(r1:0,#52,#0)
}
{
r28 = add(clb(r1:0),#-11)
}
{
r1:0 = asl(r1:0,r28)
r28 = sub(#1,r28)
}
{
r1 = insert(r28,#1,#52 -32)
}
{
r3:2 = extractu(r1:0,#23 +1,#52 -23)
r5 = ##0x3f000004
}
{
r9 = or(r5,r2)
r5 = and(r5,#-16)
jump .Ldenormal_restart
}
.Lsqrt_nan:
{
r28 = convert_df2sf(r1:0)
r1:0 = #-1
jumpr r31
}
.Lsqrt_invalid_neg:
{
r1:0 = convert_sf2df(r28)
jumpr r31
}
.size __hexagon_sqrt,.-__hexagon_sqrt
.size __hexagon_sqrtdf2,.-__hexagon_sqrtdf2
|
AshishD5/bhainlink | 825 | library/compiler-builtins/compiler-builtins/src/hexagon/moddi3.s |
FUNCTION_BEGIN __hexagon_moddi3
{
p3 = tstbit(r1,#31)
}
{
r1:0 = abs(r1:0)
r3:2 = abs(r3:2)
}
{
r6 = cl0(r1:0)
r7 = cl0(r3:2)
r5:4 = r3:2
r3:2 = r1:0
}
{
r10 = sub(r7,r6)
r1:0 = #0
r15:14 = #1
}
{
r11 = add(r10,#1)
r13:12 = lsl(r5:4,r10)
r15:14 = lsl(r15:14,r10)
}
{
p0 = cmp.gtu(r5:4,r3:2)
loop0(1f,r11)
}
{
if (p0) jump .hexagon_moddi3_return
}
.falign
1:
{
p0 = cmp.gtu(r13:12,r3:2)
}
{
r7:6 = sub(r3:2, r13:12)
r9:8 = add(r1:0, r15:14)
}
{
r1:0 = vmux(p0, r1:0, r9:8)
r3:2 = vmux(p0, r3:2, r7:6)
}
{
r15:14 = lsr(r15:14, #1)
r13:12 = lsr(r13:12, #1)
}:endloop0
.hexagon_moddi3_return:
{
r1:0 = neg(r3:2)
}
{
r1:0 = vmux(p3,r1:0,r3:2)
jumpr r31
}
FUNCTION_END __hexagon_moddi3
.globl __qdsp_moddi3
.set __qdsp_moddi3, __hexagon_moddi3
|
AshishD5/bhainlink | 864 | library/compiler-builtins/compiler-builtins/src/hexagon/divdi3.s |
FUNCTION_BEGIN __hexagon_divdi3
{
p2 = tstbit(r1,#31)
p3 = tstbit(r3,#31)
}
{
r1:0 = abs(r1:0)
r3:2 = abs(r3:2)
}
{
r6 = cl0(r1:0)
r7 = cl0(r3:2)
r5:4 = r3:2
r3:2 = r1:0
}
{
p3 = xor(p2,p3)
r10 = sub(r7,r6)
r1:0 = #0
r15:14 = #1
}
{
r11 = add(r10,#1)
r13:12 = lsl(r5:4,r10)
r15:14 = lsl(r15:14,r10)
}
{
p0 = cmp.gtu(r5:4,r3:2)
loop0(1f,r11)
}
{
if (p0) jump .hexagon_divdi3_return
}
.falign
1:
{
p0 = cmp.gtu(r13:12,r3:2)
}
{
r7:6 = sub(r3:2, r13:12)
r9:8 = add(r1:0, r15:14)
}
{
r1:0 = vmux(p0, r1:0, r9:8)
r3:2 = vmux(p0, r3:2, r7:6)
}
{
r15:14 = lsr(r15:14, #1)
r13:12 = lsr(r13:12, #1)
}:endloop0
.hexagon_divdi3_return:
{
r3:2 = neg(r1:0)
}
{
r1:0 = vmux(p3,r3:2,r1:0)
jumpr r31
}
FUNCTION_END __hexagon_divdi3
.globl __qdsp_divdi3
.set __qdsp_divdi3, __hexagon_divdi3
|
AshishD5/bhainlink | 543 | library/compiler-builtins/compiler-builtins/src/hexagon/udivsi3.s |
FUNCTION_BEGIN __hexagon_udivsi3
{
r2 = cl0(r0)
r3 = cl0(r1)
r5:4 = combine(#1,#0)
p0 = cmp.gtu(r1,r0)
}
{
r6 = sub(r3,r2)
r4 = r1
r1:0 = combine(r0,r4)
if (p0) jumpr r31
}
{
r3:2 = vlslw(r5:4,r6)
loop0(1f,r6)
}
.falign
1:
{
p0 = cmp.gtu(r2,r1)
if (!p0.new) r1 = sub(r1,r2)
if (!p0.new) r0 = add(r0,r3)
r3:2 = vlsrw(r3:2,#1)
}:endloop0
{
p0 = cmp.gtu(r2,r1)
if (!p0.new) r0 = add(r0,r3)
jumpr r31
}
FUNCTION_END __hexagon_udivsi3
.globl __qdsp_udivsi3
.set __qdsp_udivsi3, __hexagon_udivsi3
|
AshishD5/bhainlink | 764 | library/compiler-builtins/compiler-builtins/src/hexagon/memcpy_likely_aligned.s |
FUNCTION_BEGIN __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
{
p0 = bitsclr(r1,#7)
p0 = bitsclr(r0,#7)
if (p0.new) r5:4 = memd(r1)
r3 = #-3
}
{
if (!p0) jump .Lmemcpy_call
if (p0) memd(r0++#8) = r5:4
if (p0) r5:4 = memd(r1+#8)
r3 += lsr(r2,#3)
}
{
memd(r0++#8) = r5:4
r5:4 = memd(r1+#16)
r1 = add(r1,#24)
loop0(1f,r3)
}
.falign
1:
{
memd(r0++#8) = r5:4
r5:4 = memd(r1++#8)
}:endloop0
{
memd(r0) = r5:4
r0 -= add(r2,#-8)
jumpr r31
}
FUNCTION_END __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
.Lmemcpy_call:
jump memcpy@PLT
.globl __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes
.set __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes, __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
|
AshishD5/bhainlink | 5,120 | library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_dlib_asm.s | .text
.global __hexagon_fast2_dadd_asm
.type __hexagon_fast2_dadd_asm, @function
__hexagon_fast2_dadd_asm:
.falign
{
R7:6 = VABSDIFFH(R1:0, R3:2)
R9 = #62
R4 = SXTH(R0)
R5 = SXTH(R2)
} {
R6 = SXTH(R6)
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
if ( P0) R4 = #1
if (!P0) R5 = #1
R0.L = #0
R6 = MIN(R6, R9)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
R2.L = #0
R11:10 = #0
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R10.L = #0x8001
} {
R4 = clb(R1:0)
R9 = #58
} {
R4 = add(R4, #-1)
p0 = cmp.gt(R4, R9)
} {
R1:0 = ASL(R1:0, R4)
R8 = SUB(R8, R4)
if(p0) jump .Ldenorma
} {
R0 = insert(R8, #16, #0)
jumpr r31
}
.Ldenorma:
{
R1:0 = R11:10
jumpr r31
}
.text
.global __hexagon_fast2_dsub_asm
.type __hexagon_fast2_dsub_asm, @function
__hexagon_fast2_dsub_asm:
.falign
{
R7:6 = VABSDIFFH(R1:0, R3:2)
R9 = #62
R4 = SXTH(R0)
R5 = SXTH(R2)
} {
R6 = SXTH(R6)
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
if ( P0) R4 = #1
if (!P0) R5 = #1
R0.L = #0
R6 = MIN(R6, R9)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
R2.L = #0
R11:10 = #0
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R10.L = #0x8001
} {
R4 = clb(R1:0)
R9 = #58
} {
R4 = add(R4, #-1)
p0 = cmp.gt(R4, R9)
} {
R1:0 = ASL(R1:0, R4)
R8 = SUB(R8, R4)
if(p0) jump .Ldenorm
} {
R0 = insert(R8, #16, #0)
jumpr r31
}
.Ldenorm:
{
R1:0 = R11:10
jumpr r31
}
.text
.global __hexagon_fast2_dmpy_asm
.type __hexagon_fast2_dmpy_asm, @function
__hexagon_fast2_dmpy_asm:
.falign
{
R13= lsr(R2, #16)
R5 = sxth(R2)
R4 = sxth(R0)
R12= lsr(R0, #16)
}
{
R11:10 = mpy(R1, R3)
R7:6 = mpy(R1, R13)
R0.L = #0x0
R15:14 = #0
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R12)
R2.L = #0x0
R15.H = #0x8000
}
{
R7:6 = asr(R7:6, #15)
R12.L = #0x8001
p1 = cmp.eq(R1:0, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R8 = add(R4, R5)
p2 = cmp.eq(R1:0, R15:14)
}
{
R9 = clb(R7:6)
R3:2 = abs(R7:6)
R11 = #58
}
{
p1 = and(p1, p2)
R8 = sub(R8, R9)
R9 = add(R9, #-1)
p0 = cmp.gt(R9, R11)
}
{
R8 = add(R8, #1)
R1:0 = asl(R7:6, R9)
if(p1) jump .Lsat
}
{
R0 = insert(R8,#16, #0)
if(!p0) jumpr r31
}
{
R0 = insert(R12,#16, #0)
jumpr r31
}
.Lsat:
{
R1:0 = #-1
}
{
R1:0 = lsr(R1:0, #1)
}
{
R0 = insert(R8,#16, #0)
jumpr r31
}
.text
.global __hexagon_fast2_qd2f_asm
.type __hexagon_fast2_qd2f_asm, @function
__hexagon_fast2_qd2f_asm:
.falign
{
R3 = abs(R1):sat
R4 = sxth(R0)
R5 = #0x40
R6.L = #0xffc0
}
{
R0 = extractu(R3, #8, #0)
p2 = cmp.gt(R4, #126)
p3 = cmp.ge(R4, #-126)
R6.H = #0x7fff
}
{
p1 = cmp.eq(R0,#0x40)
if(p1.new) R5 = #0
R4 = add(R4, #126)
if(!p3) jump .Lmin
}
{
p0 = bitsset(R3, R6)
R0.L = #0x0000
R2 = add(R3, R5)
R7 = lsr(R6, #8)
}
{
if(p0) R4 = add(R4, #1)
if(p0) R3 = #0
R2 = lsr(R2, #7)
R0.H = #0x8000
}
{
R0 = and(R0, R1)
R6 &= asl(R4, #23)
if(!p0) R3 = and(R2, R7)
if(p2) jump .Lmax
}
{
R0 += add(R6, R3)
jumpr r31
}
.Lmax:
{
R0.L = #0xffff;
}
{
R0.H = #0x7f7f;
jumpr r31
}
.Lmin:
{
R0 = #0x0
jumpr r31
}
.text
.global __hexagon_fast2_f2qd_asm
.type __hexagon_fast2_f2qd_asm, @function
__hexagon_fast2_f2qd_asm:
.falign
{
R1 = asl(R0, #7)
p0 = tstbit(R0, #31)
R5:4 = #0
R3 = add(R0,R0)
}
{
R1 = setbit(R1, #30)
R0= extractu(R0,#8,#23)
R4.L = #0x8001
p1 = cmp.eq(R3, #0)
}
{
R1= extractu(R1, #31, #0)
R0= add(R0, #-126)
R2 = #0
if(p1) jump .Lminqd
}
{
R0 = zxth(R0)
if(p0) R1= sub(R2, R1)
jumpr r31
}
.Lminqd:
{
R1:0 = R5:4
jumpr r31
}
|
AshishD5/bhainlink | 736 | library/compiler-builtins/compiler-builtins/src/hexagon/divsi3.s |
FUNCTION_BEGIN __hexagon_divsi3
{
p0 = cmp.ge(r0,#0)
p1 = cmp.ge(r1,#0)
r1 = abs(r0)
r2 = abs(r1)
}
{
r3 = cl0(r1)
r4 = cl0(r2)
r5 = sub(r1,r2)
p2 = cmp.gtu(r2,r1)
}
{
r0 = #0
p1 = xor(p0,p1)
p0 = cmp.gtu(r2,r5)
if (p2) jumpr r31
}
{
r0 = mux(p1,#-1,#1)
if (p0) jumpr r31
r4 = sub(r4,r3)
r3 = #1
}
{
r0 = #0
r3:2 = vlslw(r3:2,r4)
loop0(1f,r4)
}
.falign
1:
{
p0 = cmp.gtu(r2,r1)
if (!p0.new) r1 = sub(r1,r2)
if (!p0.new) r0 = add(r0,r3)
r3:2 = vlsrw(r3:2,#1)
}:endloop0
{
p0 = cmp.gtu(r2,r1)
if (!p0.new) r0 = add(r0,r3)
if (!p1) jumpr r31
}
{
r0 = neg(r0)
jumpr r31
}
FUNCTION_END __hexagon_divsi3
.globl __qdsp_divsi3
.set __qdsp_divsi3, __hexagon_divsi3
|
AshishD5/bhainlink | 1,295 | library/compiler-builtins/compiler-builtins/src/hexagon/memcpy_forward_vp4cp4n2.s | .text
.globl hexagon_memcpy_forward_vp4cp4n2
.balign 32
.type hexagon_memcpy_forward_vp4cp4n2,@function
hexagon_memcpy_forward_vp4cp4n2:
{
r3 = sub(##4096, r1)
r5 = lsr(r2, #3)
}
{
r3 = extractu(r3, #10, #2)
r4 = extractu(r3, #7, #5)
}
{
r3 = minu(r2, r3)
r4 = minu(r5, r4)
}
{
r4 = or(r4, ##2105344)
p0 = cmp.eq(r3, #0)
if (p0.new) jump:nt .Lskipprolog
}
l2fetch(r1, r4)
{
loop0(.Lprolog, r3)
r2 = sub(r2, r3)
}
.falign
.Lprolog:
{
r4 = memw(r1++#4)
memw(r0++#4) = r4.new
} :endloop0
.Lskipprolog:
{
r3 = lsr(r2, #10)
if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain
}
{
loop1(.Lout, r3)
r2 = extractu(r2, #10, #0)
r3 = ##2105472
}
.falign
.Lout:
l2fetch(r1, r3)
loop0(.Lpage, #512)
.falign
.Lpage:
r5:4 = memd(r1++#8)
{
memw(r0++#8) = r4
memw(r0+#4) = r5
} :endloop0:endloop1
.Lskipmain:
{
r3 = ##2105344
r4 = lsr(r2, #3)
p0 = cmp.eq(r2, #0)
if (p0.new) jumpr:nt r31
}
{
r3 = or(r3, r4)
loop0(.Lepilog, r2)
}
l2fetch(r1, r3)
.falign
.Lepilog:
{
r4 = memw(r1++#4)
memw(r0++#4) = r4.new
} :endloop0
jumpr r31
.size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2
|
AshishD5/bhainlink | 721 | library/compiler-builtins/compiler-builtins/src/hexagon/umoddi3.s |
FUNCTION_BEGIN __hexagon_umoddi3
{
r6 = cl0(r1:0)
r7 = cl0(r3:2)
r5:4 = r3:2
r3:2 = r1:0
}
{
r10 = sub(r7,r6)
r1:0 = #0
r15:14 = #1
}
{
r11 = add(r10,#1)
r13:12 = lsl(r5:4,r10)
r15:14 = lsl(r15:14,r10)
}
{
p0 = cmp.gtu(r5:4,r3:2)
loop0(1f,r11)
}
{
if (p0) jump .hexagon_umoddi3_return
}
.falign
1:
{
p0 = cmp.gtu(r13:12,r3:2)
}
{
r7:6 = sub(r3:2, r13:12)
r9:8 = add(r1:0, r15:14)
}
{
r1:0 = vmux(p0, r1:0, r9:8)
r3:2 = vmux(p0, r3:2, r7:6)
}
{
r15:14 = lsr(r15:14, #1)
r13:12 = lsr(r13:12, #1)
}:endloop0
.hexagon_umoddi3_return:
{
r1:0 = r3:2
jumpr r31
}
FUNCTION_END __hexagon_umoddi3
.globl __qdsp_umoddi3
.set __qdsp_umoddi3, __hexagon_umoddi3
|
AshishD5/bhainlink | 632 | library/compiler-builtins/compiler-builtins/src/hexagon/udivmodsi4.s |
FUNCTION_BEGIN __hexagon_udivmodsi4
{
r2 = cl0(r0)
r3 = cl0(r1)
r5:4 = combine(#1,#0)
p0 = cmp.gtu(r1,r0)
}
{
r6 = sub(r3,r2)
r4 = r1
r1:0 = combine(r0,r4)
if (p0) jumpr r31
}
{
r3:2 = vlslw(r5:4,r6)
loop0(1f,r6)
p0 = cmp.eq(r6,#0)
if (p0.new) r4 = #0
}
.falign
1:
{
p0 = cmp.gtu(r2,r1)
if (!p0.new) r1 = sub(r1,r2)
if (!p0.new) r0 = add(r0,r3)
r3:2 = vlsrw(r3:2,#1)
}:endloop0
{
p0 = cmp.gtu(r2,r1)
if (!p0.new) r1 = sub(r1,r4)
if (!p0.new) r0 = add(r0,r3)
jumpr r31
}
FUNCTION_END __hexagon_udivmodsi4
.globl __qdsp_udivmodsi4
.set __qdsp_udivmodsi4, __hexagon_udivmodsi4
|
AshishD5/bhainlink | 833 | library/compiler-builtins/compiler-builtins/src/hexagon/dfminmax.s | .text
.global __hexagon_mindf3
.global __hexagon_maxdf3
.type __hexagon_mindf3,@function
.type __hexagon_maxdf3,@function
.global __qdsp_mindf3 ; .set __qdsp_mindf3, __hexagon_mindf3
.global __qdsp_maxdf3 ; .set __qdsp_maxdf3, __hexagon_maxdf3
.p2align 5
__hexagon_mindf3:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfcmp.gt(r1:0,r3:2)
r5:4 = r1:0
}
{
if (p0) r1:0 = r3:2
if (p1) r1:0 = r3:2
p2 = dfcmp.eq(r1:0,r3:2)
if (!p2.new) jumpr:t r31
}
{
r1:0 = or(r5:4,r3:2)
jumpr r31
}
.size __hexagon_mindf3,.-__hexagon_mindf3
.falign
__hexagon_maxdf3:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfcmp.gt(r3:2,r1:0)
r5:4 = r1:0
}
{
if (p0) r1:0 = r3:2
if (p1) r1:0 = r3:2
p2 = dfcmp.eq(r1:0,r3:2)
if (!p2.new) jumpr:t r31
}
{
r1:0 = and(r5:4,r3:2)
jumpr r31
}
.size __hexagon_maxdf3,.-__hexagon_maxdf3
|
AshishD5/bhainlink | 3,885 | library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_ldlib_asm.s | .text
.global __hexagon_fast2ldadd_asm
.type __hexagon_fast2ldadd_asm, @function
__hexagon_fast2ldadd_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = MIN(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = ASL(R1:0, R4)
if(p0) jump .Ldenorma1
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma1:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldsub_asm
.type __hexagon_fast2ldsub_asm, @function
__hexagon_fast2ldsub_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = min(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = asl(R1:0, R4)
if(p0) jump .Ldenorma_s
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma_s:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldmpy_asm
.type __hexagon_fast2ldmpy_asm, @function
__hexagon_fast2ldmpy_asm:
.falign
{
R15:14 = memd(r29+#0)
R3:2 = memd(r29+#16)
R13:12 = #0
}
{
R8= extractu(R2, #31, #1)
R9= extractu(R14, #31, #1)
R13.H = #0x8000
}
{
R11:10 = mpy(R15, R3)
R7:6 = mpy(R15, R8)
R4 = memw(r29+#8)
R5 = memw(r29+#24)
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R9)
}
{
R7:6 = asr(R7:6, #30)
R8.L = #0x0001
p1 = cmp.eq(R15:14, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R4= add(R4, R5)
p2 = cmp.eq(R3:2, R13:12)
}
{
R9 = clb(R7:6)
R8.H = #0x8000
p1 = and(p1, p2)
}
{
R4-= add(R9, #-1)
R9 = add(R9, #-1)
if(p1) jump .Lsat1
}
{
R7:6 = asl(R7:6, R9)
memw(R0+#8) = R4
p0 = cmp.gt(R9, #58)
if(p0.new) jump:NT .Ldenorm1
}
{
memd(R0+#0) = R7:6
jumpr r31
}
.Lsat1:
{
R13:12 = #0
R4+= add(R9, #1)
}
{
R13.H = #0x4000
memw(R0+#8) = R4
}
{
memd(R0+#0) = R13:12
jumpr r31
}
.Ldenorm1:
{
memw(R0+#8) = R8
R15:14 = #0
}
{
memd(R0+#0) = R15:14
jumpr r31
}
|
AshishD5/bhainlink | 872 | library/compiler-builtins/compiler-builtins/src/hexagon/sfsqrt_opt.s | FUNCTION_BEGIN __hexagon_sqrtf
{
r3,p0 = sfinvsqrta(r0)
r5 = sffixupr(r0)
r4 = ##0x3f000000
r1:0 = combine(#0,#0)
}
{
r0 += sfmpy(r3,r5):lib
r1 += sfmpy(r3,r4):lib
r2 = r4
r3 = r5
}
{
r2 -= sfmpy(r0,r1):lib
p1 = sfclass(r5,#1)
}
{
r0 += sfmpy(r0,r2):lib
r1 += sfmpy(r1,r2):lib
r2 = r4
r3 = r5
}
{
r2 -= sfmpy(r0,r1):lib
r3 -= sfmpy(r0,r0):lib
}
{
r0 += sfmpy(r1,r3):lib
r1 += sfmpy(r1,r2):lib
r2 = r4
r3 = r5
}
{
r3 -= sfmpy(r0,r0):lib
if (p1) r0 = or(r0,r5)
}
{
r0 += sfmpy(r1,r3,p0):scale
jumpr r31
}
FUNCTION_END __hexagon_sqrtf
.global __qdsp_sqrtf ; .set __qdsp_sqrtf, __hexagon_sqrtf
.global __hexagon_fast_sqrtf ; .set __hexagon_fast_sqrtf, __hexagon_sqrtf
.global __hexagon_fast2_sqrtf ; .set __hexagon_fast2_sqrtf, __hexagon_sqrtf
|
AshishD5/bhainlink | 11,809 | library/std/src/sys/pal/sgx/abi/entry.S | /* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
AshishD5/bhainlink | 79 | tests/ui/asm/named-asm-labels.s | lab1: nop
// do more things
lab2: nop // does bar
// a: b
lab3: nop; lab4: nop
|
AshishD5/bhainlink | 29 | tests/codegen/asm/foo.s | .global foo
foo:
jmp baz
|
AshishD5/bhainlink | 136 | tests/run-make/x86_64-fortanix-unknown-sgx-lvi/enclave/foo_asm.s | .text
.global cc_plus_one_asm
.type cc_plus_one_asm, @function
cc_plus_one_asm:
movl (%rdi), %eax
inc %eax
retq
|
AshishD5/bhainlink | 145 | tests/run-make/x86_64-fortanix-unknown-sgx-lvi/enclave/libcmake_foo/src/foo_asm.s | .text
.global cmake_plus_one_asm
.type cmake_plus_one_asm, @function
cmake_plus_one_asm:
movl (%rdi), %eax
inc %eax
retq
|
ashutosh-py/Rust | 11,809 | library/std/src/sys/pal/sgx/abi/entry.S | /* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
ashutosh-py/Rust | 29 | tests/codegen/foo.s | .global foo
foo:
jmp baz
|
ashutosh-py/Rust | 79 | tests/ui/asm/named-asm-labels.s | lab1: nop
// do more things
lab2: nop // does bar
// a: b
lab3: nop; lab4: nop
|
ashutosh-py/Rust | 136 | tests/run-make/x86_64-fortanix-unknown-sgx-lvi/enclave/foo_asm.s | .text
.global cc_plus_one_asm
.type cc_plus_one_asm, @function
cc_plus_one_asm:
movl (%rdi), %eax
inc %eax
retq
|
ashutosh-py/Rust | 145 | tests/run-make/x86_64-fortanix-unknown-sgx-lvi/enclave/libcmake_foo/src/foo_asm.s | .text
.global cmake_plus_one_asm
.type cmake_plus_one_asm, @function
cmake_plus_one_asm:
movl (%rdi), %eax
inc %eax
retq
|
AspeedTech-BMC/caliptra-sw | 8,866 | rom/dev/src/start.S | /*++
Licensed under the Apache-2.0 license.
File Name:
start.S
Abstract:
File contains startup code for Caliptra.
Environment:
ROM
--*/
.section .init, "ax"
.global _start
_start:
.cfi_startproc
.cfi_undefined ra
// Clear minstret
csrw minstret, zero
csrw minstreth, zero
// Disable interrupts and clear pending interrupts
csrw mstatus, 0
csrw mie, 0
csrw mip, 0
// Clear all registers
li x1, 0; li x2, 0; li x3, 0; li x4, 0;
li x5, 0; li x6, 0; li x7, 0; li x8, 0;
li x9, 0; li x10, 0; li x11, 0; li x12, 0;
li x13, 0; li x14, 0; li x15, 0; li x16, 0;
li x17, 0; li x18, 0; li x19, 0; li x20, 0;
li x21, 0; li x22, 0; li x23, 0; li x24, 0;
li x25, 0; li x26, 0; li x27, 0; li x28, 0;
li x29, 0; li x30, 0; li x31, 0;
// Setup the global pointer to enable linker relaxation.
// Linker relaxation enables generation of relative jump
// instruction on function calls and jumps. The relative
// jumps have a tighter encoding than absolute jumps hence
// reducing code memory usage.
.option push
.option norelax
la gp, __global_pointer$
.option pop
// Setup Memory Region Attributes
//
// Veer Core Memory is divided in to 16 regions of 256 MB
// each. Each region has 2 possible attributes:
// 1. Cacheability
// 2. Side Effects
//
// Caliptra does not have any caches hence the Cacheability
// attribute is zero for all regions. Side-effect as not
// observable in memory regions containing ROM, ICCM & DCCM.
// However they are enabled for regions containing peripheral
// registers.
//
// ---------------------------------------------
// | Region | Side Effect | Cacheable |
// ---------------------------------------------
// | 0x0000_0000 | 0 | 1 |
// | 0x4000_0000 | 0 | 0 |
// | 0x5000_0000 | 0 | 0 |
// | Others | 1 | 0 |
// ---------------------------------------------
//
// CSR MRAC = 0x7C0
li x1, 0xAAAAA0A9
csrw 0x7C0, x1
// Setup stack pointer
la sp, _sstack
// Interrupts are disabled and will remain disabled in ROM so we only
// need to worry about exceptions, so no reason to do vectored.
// Setup Direct Exception Vector
la t0, _exception_handler
csrw mtvec, t0
// Setup NMI Vector
// Load address of NMI handler
la t0, _nmi_handler
// Load address of MMIO NMI vector register
// CLP_SOC_IFC_REG_INTERNAL_NMI_VECTOR = 0x3003062c
li t1, 0x3003062c
// Store address of NMI handler in MMIO NMI vector register
sw t0, 0x0(t1)
// Initialize ICCM & DCCM on cold boot to
// all zeros. This is needed to intialize the ECC
// in ICCM & DCCM.
// CLP_SOC_IFC_REG_CPTRA_RESET_REASON = 0x30030040
li t0, 0x30030040
lw t1, 0x0(t0)
andi t1, t1, 0x3
bne t1, x0, post_ecc_init
//
// Cold Boot
//
// Zero ICCM
la a0, ICCM_ORG // dest
la a1, ICCM_SIZE // len
call _zero_mem256
// Zero DCCM
la a0, DCCM_ORG // dest
la a1, DCCM_SIZE // len
call _zero_mem256
post_ecc_init:
// Copy Data Section
// la a0, _sdata // dest
// la a1, _sidata // src
// la a2, _data_len // len
// call _copy_mem32
// Zero BSS Section
// la a0, _sbss // dest
// la a1, _bss_len // len
// call _zero_mem32
tail rom_entry
.cfi_endproc
.section .init.text, "ax"
.align 2
_zero_mem256:
.cfi_startproc
// Can't use "sw x0" below if we want c.sw
li a2, 0
add a1, a1, a0
1:
sw a2, 0(a0)
sw a2, 4(a0)
sw a2, 8(a0)
sw a2, 12(a0)
sw a2, 16(a0)
sw a2, 20(a0)
sw a2, 24(a0)
sw a2, 28(a0)
addi a0, a0, 32
bltu a0, a1, 1b
ret
.cfi_endproc
.section .init.text, "ax"
.align 2
_copy_mem32:
.cfi_startproc
li t0, 4
1:
beqz a2, 1f
lw t1, 0(a1)
sw t1, 0(a0)
addi a0, a0, 4
addi a1, a1, 4
sub a2, a2, t0
j 1b
1:
ret
.cfi_endproc
.section .init.text, "ax"
.align 2
_exception_handler:
// Save sp to mscratch
csrw mscratch, sp
// Switch to exception stack
la sp, _sestack
// Allocate space for all relevant registers
// (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval)
addi sp, sp, -88
// Save relevant registers to stack except x2(sp) since that is in mscratch
sw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
sw a0, 0x8(sp)
sw a1, 0xC(sp)
sw a2, 0x10(sp)
sw a3, 0x14(sp)
sw a4, 0x18(sp)
sw a5, 0x1C(sp)
sw a6, 0x20(sp)
sw a7, 0x24(sp)
sw t0, 0x28(sp)
sw t1, 0x2C(sp)
sw t2, 0x30(sp)
sw t3, 0x34(sp)
sw t4, 0x38(sp)
sw t5, 0x3C(sp)
sw t6, 0x40(sp)
// Save original sp to 0x4(sp)
csrr t0, mscratch // Load mscratch (original sp) to t0
sw t0, 0x4(sp)
// Save mepc to 0x44(sp)
csrr t0, mepc // Load mepc to t0
sw t0, 0x44(sp)
// Save mcause to 0x48(sp)
csrr t0, mcause // Load mcause to t0
sw t0, 0x48(sp)
# // Save mscause to 0x4C(sp)
// MSCAUSE = 0x7FF
csrr t0, 0x7FF // Load mscause to t0
sw t0, 0x4C(sp)
// Save mstatus to 0x50(sp)
csrr t0, mstatus // Load mstatus to t0
sw t0, 0x50(sp)
// Save mtval to 0x54(sp)
csrr t0, mtval // Load mtval to t0
sw t0, 0x54(sp)
// Call the rust trap handler with the stack pointer as the parameter
addi a0, sp, 0
jal exception_handler
// Restore relevant registers except x2(sp)
lw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
lw a0, 0x8(sp)
lw a1, 0xC(sp)
lw a2, 0x10(sp)
lw a3, 0x14(sp)
lw a4, 0x18(sp)
lw a5, 0x1C(sp)
lw a6, 0x20(sp)
lw a7, 0x24(sp)
lw t0, 0x28(sp)
lw t1, 0x2C(sp)
lw t2, 0x30(sp)
lw t3, 0x34(sp)
lw t4, 0x38(sp)
lw t5, 0x3C(sp)
lw t6, 0x40(sp)
// Restore original sp from 0x4(sp)
lw sp, 0x4(sp)
mret
.section .init.text, "ax"
.align 2
_nmi_handler:
// Save sp to mscratch
csrw mscratch, sp
// Switch to exception stack
la sp, _snstack
// Allocate space for all relevant registers (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval)
addi sp, sp, -88
// Save relevant registers to stack except x2(sp) since that is in mscratch
sw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
sw a0, 0x8(sp)
sw a1, 0xC(sp)
sw a2, 0x10(sp)
sw a3, 0x14(sp)
sw a4, 0x18(sp)
sw a5, 0x1C(sp)
sw a6, 0x20(sp)
sw a7, 0x24(sp)
sw t0, 0x28(sp)
sw t1, 0x2C(sp)
sw t2, 0x30(sp)
sw t3, 0x34(sp)
sw t4, 0x38(sp)
sw t5, 0x3C(sp)
sw t6, 0x40(sp)
// Save original sp to 0x4(sp)
csrr t0, mscratch // Load mscratch (original sp) to t0
sw t0, 0x4(sp)
// Save mepc to 0x44(sp)
csrr t0, mepc // Load mepc to t0
sw t0, 0x44(sp)
// Save mcause to 0x48(sp)
csrr t0, mcause // Load mcause to t0
sw t0, 0x48(sp)
# // Save mscause to 0x4C(sp)
// MSCAUSE = 0x7FF
csrr t0, 0x7FF // Load mscause to t0
sw t0, 0x4C(sp)
// Save mstatus to 0x50(sp)
csrr t0, mstatus // Load mstatus to t0
sw t0, 0x50(sp)
// Save mtval to 0x54(sp)
csrr t0, mtval // Load mtval to t0
sw t0, 0x54(sp)
// Call the rust nmi handler with the stack pointer as the parameter
addi a0, sp, 0
jal nmi_handler
// Restore relevant registers except x2(sp)
lw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
lw a0, 0x8(sp)
lw a1, 0xC(sp)
lw a2, 0x10(sp)
lw a3, 0x14(sp)
lw a4, 0x18(sp)
lw a5, 0x1C(sp)
lw a6, 0x20(sp)
lw a7, 0x24(sp)
lw t0, 0x28(sp)
lw t1, 0x2C(sp)
lw t2, 0x30(sp)
lw t3, 0x34(sp)
lw t4, 0x38(sp)
lw t5, 0x3C(sp)
lw t6, 0x40(sp)
// Restore original sp from 0x4(sp)
lw sp, 0x4(sp)
mret
.section .init.text, "ax"
.align 2
.global exit_rom
exit_rom:
.cfi_startproc
//
// Clear the stack
//
// Save the FMC address
addi a3, a0, 0
la a0, STACK_ORG // dest
la a1, STACK_SIZE // len
call _zero_mem256
// Clear all registers
li x1, 0; li x2, 0; li x3, 0; li x4, 0;
li x5, 0; li x6, 0; li x7, 0; li x8, 0;
li x9, 0; li x10, 0; li x11, 0; li x12, 0;
// Don't clear x13 as it contains the FMC address.
li x14, 0; li x15, 0; li x16, 0;
li x17, 0; li x18, 0; li x19, 0; li x20, 0;
li x21, 0; li x22, 0; li x23, 0; li x24, 0;
li x25, 0; li x26, 0; li x27, 0; li x28, 0;
li x29, 0; li x30, 0; li x31, 0;
// Jump to FMC
jr a3
1:
j 1b
.cfi_endproc
|
AspeedTech-BMC/caliptra-sw | 6,393 | rom/dev/tools/test-fmc/src/start.S | /*++
Licensed under the Apache-2.0 license.
File Name:
start.S
Abstract:
File contains startup code for Caliptra.
Environment:
FMC
--*/
.section .init, "ax"
.global _start
_start:
.cfi_startproc
.cfi_undefined ra
// Clear minstret
csrw minstret, zero
csrw minstreth, zero
// Disable interrupts and clear pending interrupts
csrw mstatus, 0
csrw mie, 0
csrw mip, 0
// Clear all registers
li x1, 0; li x2, 0; li x3, 0; li x4, 0;
li x5, 0; li x6, 0; li x7, 0; li x8, 0;
li x9, 0; li x10, 0; li x11, 0; li x12, 0;
li x13, 0; li x14, 0; li x15, 0; li x16, 0;
li x17, 0; li x18, 0; li x19, 0; li x20, 0;
li x21, 0; li x22, 0; li x23, 0; li x24, 0;
li x25, 0; li x26, 0; li x27, 0; li x28, 0;
li x29, 0; li x30, 0; li x31, 0;
// Setup the global pointer to enable linker relaxation.
// Linker relaxation enables generation of relative jump
// instruction on function calls and jumps. The relative
// jumps have a tigher encoding than absolute jumps hence
// reducing code memory usage.
.option push
.option norelax
la gp, __global_pointer$
.option pop
// Setup stack pointer
la sp, _sstack
// Interrupts are disabled and will remain disabled in ROM so we only
// need to worry about exceptions, so no reason to do vectored.
// Setup Direct Exception Vector
la t0, _exception_handler
csrw mtvec, t0
// Setup NMI Vector
// Load address of NMI handler
// la t0, _nmi_handler
// Load address of MMIO NMI vector register
// CLP_SOC_IFC_REG_INTERNAL_NMI_VECTOR = 0x3003062c
// li t1, 0x3003062c
// Store address of NMI handler in MMIO NMI vector register
// sw t0, 0x0(t1)
// Copy Data Section
// la a0, _sdata // dest
// la a1, _sidata // src
// la a2, _data_len // len
// call _copy_mem32
// Zero BSS Section
// la a0, _sbss // dest
// la a1, _bss_len // len
// call _zero_mem32
tail fmc_entry
.cfi_endproc
.section .init.text, "ax"
.align 2
_zero_mem32:
.cfi_startproc
li t0, 4
1:
beqz a1, 1f
sw x0, 0(a0)
addi a0, a0, 4
sub a1, a1, t0
j 1b
1:
ret
.cfi_endproc
.section .init.text, "ax"
.align 2
_copy_mem32:
.cfi_startproc
li t0, 4
1:
beqz a2, 1f
lw t1, 0(a1)
sw t1, 0(a0)
addi a0, a0, 4
addi a1, a1, 4
sub a2, a2, t0
j 1b
1:
ret
.cfi_endproc
.section .init.text, "ax"
.align 2
_exception_handler:
// Save sp to mscratch
csrw mscratch, sp
// Switch to exception stack
la sp, _sestack
// Allocate space for all relevant registers
// (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval)
addi sp, sp, -88
// Save relevant registers to stack except x2(sp) since that is in mscratch
sw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
sw a0, 0x8(sp)
sw a1, 0xC(sp)
sw a2, 0x10(sp)
sw a3, 0x14(sp)
sw a4, 0x18(sp)
sw a5, 0x1C(sp)
sw a6, 0x20(sp)
sw a7, 0x24(sp)
sw t0, 0x28(sp)
sw t1, 0x2C(sp)
sw t2, 0x30(sp)
sw t3, 0x34(sp)
sw t4, 0x38(sp)
sw t5, 0x3C(sp)
sw t6, 0x40(sp)
// Save original sp to 0x4(sp)
csrr t0, mscratch // Load mscratch (original sp) to t0
sw t0, 0x4(sp)
// Save mepc to 0x7C(sp)
csrr t0, mepc // Load mepc to t0
sw t0, 0x44(sp)
// Save mcause to 0x80(sp)
csrr t0, mcause // Load mcause to t0
sw t0, 0x48(sp)
# // Save mscause to 0x84(sp)
// MSCAUSE = 0x7FF
csrr t0, 0x7FF // Load mscause to t0
sw t0, 0x4C(sp)
// Save mstatus to 0x88(sp)
csrr t0, mstatus // Load mstatus to t0
sw t0, 0x50(sp)
// Save mtval to 0x8C(sp)
csrr t0, mtval // Load mtval to t0
sw t0, 0x54(sp)
// Call the rust trap handler with the stack pointer as the parameter
addi a0, sp, 0
jal exception_handler
// Restore relevant registers except x2(sp)
lw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
lw a0, 0x8(sp)
lw a1, 0xC(sp)
lw a2, 0x10(sp)
lw a3, 0x14(sp)
lw a4, 0x18(sp)
lw a5, 0x1C(sp)
lw a6, 0x20(sp)
lw a7, 0x24(sp)
lw t0, 0x28(sp)
lw t1, 0x2C(sp)
lw t2, 0x30(sp)
lw t3, 0x34(sp)
lw t4, 0x38(sp)
lw t5, 0x3C(sp)
lw t6, 0x40(sp)
// Restore original sp from 0x4(sp)
lw sp, 0x4(sp)
mret
.section .init.text, "ax"
.align 2
_nmi_handler:
// Save sp to mscratch
csrw mscratch, sp
// Switch to exception stack
la sp, _snstack
// Allocate space for all relevant registers (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval)
addi sp, sp, -88
// Save relevant registers to stack except x2(sp) since that is in mscratch
sw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
sw a0, 0x8(sp)
sw a1, 0xC(sp)
sw a2, 0x10(sp)
sw a3, 0x14(sp)
sw a4, 0x18(sp)
sw a5, 0x1C(sp)
sw a6, 0x20(sp)
sw a7, 0x24(sp)
sw t0, 0x28(sp)
sw t1, 0x2C(sp)
sw t2, 0x30(sp)
sw t3, 0x34(sp)
sw t4, 0x38(sp)
sw t5, 0x3C(sp)
sw t6, 0x40(sp)
// Save original sp to 0x4(sp)
csrr t0, mscratch // Load mscratch (original sp) to t0
sw t0, 0x4(sp)
// Save mepc to 0x7C(sp)
csrr t0, mepc // Load mepc to t0
sw t0, 0x44(sp)
// Save mcause to 0x80(sp)
csrr t0, mcause // Load mcause to t0
sw t0, 0x48(sp)
# // Save mscause to 0x84(sp)
// MSCAUSE = 0x7FF
csrr t0, 0x7FF // Load mscause to t0
sw t0, 0x4C(sp)
// Save mstatus to 0x88(sp)
csrr t0, mstatus // Load mstatus to t0
sw t0, 0x50(sp)
// Save mtval to 0x8C(sp)
csrr t0, mtval // Load mtval to t0
sw t0, 0x54(sp)
// Call the rust nmi handler with the stack pointer as the parameter
addi a0, sp, 0
jal nmi_handler
// Restore relevant registers except x2(sp)
lw ra, 0x0(sp)
// Skipping 0x4(sp) for now to store sp later
lw a0, 0x8(sp)
lw a1, 0xC(sp)
lw a2, 0x10(sp)
lw a3, 0x14(sp)
lw a4, 0x18(sp)
lw a5, 0x1C(sp)
lw a6, 0x20(sp)
lw a7, 0x24(sp)
lw t0, 0x28(sp)
lw t1, 0x2C(sp)
lw t2, 0x30(sp)
lw t3, 0x34(sp)
lw t4, 0x38(sp)
lw t5, 0x3C(sp)
lw t6, 0x40(sp)
// Restore original sp from 0x4(sp)
lw sp, 0x4(sp)
mret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.