repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
I-asked/api-daemon | 3,979 | third-party/rustix-0.37.27/src/backend/linux_raw/arch/outline/arm.s | // Assembly code for making arm syscalls.
//
// arm syscall argument register ordering is the similar to the arm
// userspace argument register ordering except that the syscall number
// (nr) is passed in r7.
//
// nr_last.rs takes care of reordering the nr argument to the end for us,
// so we only need to move nr into r7 and take care of r4 and r5 if needed.
.file "arm.s"
.arch armv5t
.section .text.rustix_syscall0_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall0_nr_last
.hidden rustix_syscall0_nr_last
.type rustix_syscall0_nr_last, %function
rustix_syscall0_nr_last:
.fnstart
.cantunwind
push {r7, lr}
mov r7, r0
svc #0
pop {r7, pc}
.fnend
.size rustix_syscall0_nr_last, .-rustix_syscall0_nr_last
.section .text.rustix_syscall1_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall1_nr_last
.hidden rustix_syscall1_nr_last
.type rustix_syscall1_nr_last, %function
rustix_syscall1_nr_last:
.fnstart
.cantunwind
push {r7, lr}
mov r7, r1
svc #0
pop {r7, pc}
.fnend
.size rustix_syscall1_nr_last, .-rustix_syscall1_nr_last
.section .text.rustix_syscall1_noreturn_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall1_noreturn_nr_last
.hidden rustix_syscall1_noreturn_nr_last
.type rustix_syscall1_noreturn_nr_last, %function
rustix_syscall1_noreturn_nr_last:
.fnstart
.cantunwind
// Don't save r7 and lr; this is noreturn, so we'll never restore them.
mov r7, r1
svc #0
udf #16 // Trap instruction
.fnend
.size rustix_syscall1_noreturn_nr_last, .-rustix_syscall1_noreturn_nr_last
.section .text.rustix_syscall2_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall2_nr_last
.hidden rustix_syscall2_nr_last
.type rustix_syscall2_nr_last, %function
rustix_syscall2_nr_last:
.fnstart
.cantunwind
push {r7, lr}
mov r7, r2
svc #0
pop {r7, pc}
.fnend
.size rustix_syscall2_nr_last, .-rustix_syscall2_nr_last
.section .text.rustix_syscall3_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall3_nr_last
.hidden rustix_syscall3_nr_last
.type rustix_syscall3_nr_last, %function
rustix_syscall3_nr_last:
.fnstart
.cantunwind
push {r7, lr}
mov r7, r3
svc #0
pop {r7, pc}
.fnend
.size rustix_syscall3_nr_last, .-rustix_syscall3_nr_last
.section .text.rustix_syscall4_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall4_nr_last
.hidden rustix_syscall4_nr_last
.type rustix_syscall4_nr_last, %function
rustix_syscall4_nr_last:
.fnstart
.cantunwind
push {r7, lr}
ldr r7, [sp, #8]
svc #0
pop {r7, pc}
.fnend
.size rustix_syscall4_nr_last, .-rustix_syscall4_nr_last
.section .text.rustix_syscall5_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall5_nr_last
.hidden rustix_syscall5_nr_last
.type rustix_syscall5_nr_last, %function
rustix_syscall5_nr_last:
.fnstart
.cantunwind
push {r4, r7, r11, lr}
ldr r7, [sp, #20]
ldr r4, [sp, #16]
svc #0
pop {r4, r7, r11, pc}
.fnend
.size rustix_syscall5_nr_last, .-rustix_syscall5_nr_last
.section .text.rustix_syscall6_nr_last,"ax",%progbits
.p2align 4
.weak rustix_syscall6_nr_last
.hidden rustix_syscall6_nr_last
.type rustix_syscall6_nr_last, %function
rustix_syscall6_nr_last:
.fnstart
.cantunwind
push {r4, r5, r7, lr}
add r7, sp, #16
ldm r7, {r4, r5, r7}
svc #0
pop {r4, r5, r7, pc}
.fnend
.size rustix_syscall6_nr_last, .-rustix_syscall6_nr_last
.section .note.GNU-stack,"",%progbits
|
I-asked/api-daemon | 12,301 | third-party/rustix-0.37.27/src/backend/linux_raw/arch/outline/x86.s | // Assembly code for making x86 syscalls.
//
// On x86 we use the "fastcall" convention which passes the first two
// arguments in ecx and edx. Outline.rs reorders the arguments to put
// a1 and a2 in those registers so they we don't have to move them to
// set up the kernel convention.
//
// "fastcall" expects callee to pop argument stack space, so we use
// `ret imm` instructions to clean up the stack. We don't need callee
// cleanup per se, it just comes along with using "fastcall".
.file "x86.s"
.intel_syntax noprefix
.section .text.rustix_indirect_syscall0_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall0_nr_last_fastcall
.hidden rustix_indirect_syscall0_nr_last_fastcall
.type rustix_indirect_syscall0_nr_last_fastcall, @function
rustix_indirect_syscall0_nr_last_fastcall:
.cfi_startproc
mov eax,ecx
call edx
ret
.cfi_endproc
.size rustix_indirect_syscall0_nr_last_fastcall, .-rustix_indirect_syscall0_nr_last_fastcall
.section .text.rustix_indirect_syscall1_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall1_nr_last_fastcall
.hidden rustix_indirect_syscall1_nr_last_fastcall
.type rustix_indirect_syscall1_nr_last_fastcall, @function
rustix_indirect_syscall1_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
.cfi_offset ebx, -8
mov ebx,ecx
mov eax,edx
call DWORD PTR [esp+0x8]
pop ebx
.cfi_def_cfa_offset 4
ret 0x4
.cfi_endproc
.size rustix_indirect_syscall1_nr_last_fastcall, .-rustix_indirect_syscall1_nr_last_fastcall
.section .text.rustix_indirect_syscall1_noreturn_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall1_noreturn_nr_last_fastcall
.hidden rustix_indirect_syscall1_noreturn_nr_last_fastcall
.type rustix_indirect_syscall1_noreturn_nr_last_fastcall, @function
rustix_indirect_syscall1_noreturn_nr_last_fastcall:
.cfi_startproc
mov ebx,ecx
mov eax,edx
call DWORD PTR [esp+0x4]
ud2
.cfi_endproc
.size rustix_indirect_syscall1_noreturn_nr_last_fastcall, .-rustix_indirect_syscall1_noreturn_nr_last_fastcall
.section .text.rustix_indirect_syscall2_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall2_nr_last_fastcall
.hidden rustix_indirect_syscall2_nr_last_fastcall
.type rustix_indirect_syscall2_nr_last_fastcall, @function
rustix_indirect_syscall2_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
.cfi_offset ebx, -8
mov ebx,edx
mov eax,DWORD PTR [esp+0x8]
call DWORD PTR [esp+0xc]
pop ebx
.cfi_def_cfa_offset 4
ret 0x8
.cfi_endproc
.size rustix_indirect_syscall2_nr_last_fastcall, .-rustix_indirect_syscall2_nr_last_fastcall
.section .text.rustix_indirect_syscall3_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall3_nr_last_fastcall
.hidden rustix_indirect_syscall3_nr_last_fastcall
.type rustix_indirect_syscall3_nr_last_fastcall, @function
rustix_indirect_syscall3_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0x8]
mov eax,DWORD PTR [esp+0xc]
call DWORD PTR [esp+0x10]
pop ebx
.cfi_def_cfa_offset 4
ret 0xc
.cfi_endproc
.size rustix_indirect_syscall3_nr_last_fastcall, .-rustix_indirect_syscall3_nr_last_fastcall
.section .text.rustix_indirect_syscall4_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall4_nr_last_fastcall
.hidden rustix_indirect_syscall4_nr_last_fastcall
.type rustix_indirect_syscall4_nr_last_fastcall, @function
rustix_indirect_syscall4_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
push esi
.cfi_def_cfa_offset 12
.cfi_offset esi, -12
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0xc]
mov esi,DWORD PTR [esp+0x10]
mov eax,DWORD PTR [esp+0x14]
call DWORD PTR [esp+0x18]
pop esi
.cfi_def_cfa_offset 8
pop ebx
.cfi_def_cfa_offset 4
ret 0x10
.cfi_endproc
.size rustix_indirect_syscall4_nr_last_fastcall, .-rustix_indirect_syscall4_nr_last_fastcall
.section .text.rustix_indirect_syscall5_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall5_nr_last_fastcall
.hidden rustix_indirect_syscall5_nr_last_fastcall
.type rustix_indirect_syscall5_nr_last_fastcall, @function
rustix_indirect_syscall5_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
push esi
.cfi_def_cfa_offset 12
push edi
.cfi_def_cfa_offset 16
.cfi_offset edi, -16
.cfi_offset esi, -12
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0x10]
mov esi,DWORD PTR [esp+0x14]
mov edi,DWORD PTR [esp+0x18]
mov eax,DWORD PTR [esp+0x1c]
call DWORD PTR [esp+0x20]
pop edi
.cfi_def_cfa_offset 12
pop esi
.cfi_def_cfa_offset 8
pop ebx
.cfi_def_cfa_offset 4
ret 0x14
.cfi_endproc
.size rustix_indirect_syscall5_nr_last_fastcall, .-rustix_indirect_syscall5_nr_last_fastcall
.section .text.rustix_indirect_syscall6_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_indirect_syscall6_nr_last_fastcall
.hidden rustix_indirect_syscall6_nr_last_fastcall
.type rustix_indirect_syscall6_nr_last_fastcall, @function
rustix_indirect_syscall6_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
push esi
.cfi_def_cfa_offset 12
push edi
.cfi_def_cfa_offset 16
push ebp
.cfi_def_cfa_offset 20
.cfi_offset ebp, -20
.cfi_offset edi, -16
.cfi_offset esi, -12
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0x14]
mov esi,DWORD PTR [esp+0x18]
mov edi,DWORD PTR [esp+0x1c]
mov ebp,DWORD PTR [esp+0x20]
mov eax,DWORD PTR [esp+0x24]
call DWORD PTR [esp+0x28]
pop ebp
.cfi_def_cfa_offset 16
pop edi
.cfi_def_cfa_offset 12
pop esi
.cfi_def_cfa_offset 8
pop ebx
.cfi_def_cfa_offset 4
ret 0x18
.cfi_endproc
.size rustix_indirect_syscall6_nr_last_fastcall, .-rustix_indirect_syscall6_nr_last_fastcall
.section .text.rustix_syscall0_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall0_nr_last_fastcall
.hidden rustix_syscall0_nr_last_fastcall
.type rustix_syscall0_nr_last_fastcall, @function
rustix_syscall0_nr_last_fastcall:
.cfi_startproc
mov eax,ecx
int 0x80
ret
.cfi_endproc
.size rustix_syscall0_nr_last_fastcall, .-rustix_syscall0_nr_last_fastcall
.section .text.rustix_syscall1_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall1_nr_last_fastcall
.hidden rustix_syscall1_nr_last_fastcall
.type rustix_syscall1_nr_last_fastcall, @function
rustix_syscall1_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
.cfi_offset ebx, -8
mov eax,edx
mov ebx,ecx
int 0x80
pop ebx
.cfi_def_cfa_offset 4
ret
.cfi_endproc
.size rustix_syscall1_nr_last_fastcall, .-rustix_syscall1_nr_last_fastcall
.section .text.rustix_syscall1_noreturn_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall1_noreturn_nr_last_fastcall
.hidden rustix_syscall1_noreturn_nr_last_fastcall
.type rustix_syscall1_noreturn_nr_last_fastcall, @function
rustix_syscall1_noreturn_nr_last_fastcall:
.cfi_startproc
mov eax,edx
mov ebx,ecx
int 0x80
ud2
.cfi_endproc
.size rustix_syscall1_noreturn_nr_last_fastcall, .-rustix_syscall1_noreturn_nr_last_fastcall
.section .text.rustix_syscall2_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall2_nr_last_fastcall
.hidden rustix_syscall2_nr_last_fastcall
.type rustix_syscall2_nr_last_fastcall, @function
rustix_syscall2_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
.cfi_offset ebx, -8
mov ebx,edx
mov eax,DWORD PTR [esp+0x8]
int 0x80
pop ebx
.cfi_def_cfa_offset 4
ret 0x4
.cfi_endproc
.size rustix_syscall2_nr_last_fastcall, .-rustix_syscall2_nr_last_fastcall
.section .text.rustix_syscall3_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall3_nr_last_fastcall
.hidden rustix_syscall3_nr_last_fastcall
.type rustix_syscall3_nr_last_fastcall, @function
rustix_syscall3_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0x8]
mov eax,DWORD PTR [esp+0xc]
int 0x80
pop ebx
.cfi_def_cfa_offset 4
ret 0x8
.cfi_endproc
.size rustix_syscall3_nr_last_fastcall, .-rustix_syscall3_nr_last_fastcall
.section .text.rustix_syscall4_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall4_nr_last_fastcall
.hidden rustix_syscall4_nr_last_fastcall
.type rustix_syscall4_nr_last_fastcall, @function
rustix_syscall4_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
push esi
.cfi_def_cfa_offset 12
.cfi_offset esi, -12
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0xc]
mov esi,DWORD PTR [esp+0x10]
mov eax,DWORD PTR [esp+0x14]
int 0x80
pop esi
.cfi_def_cfa_offset 8
pop ebx
.cfi_def_cfa_offset 4
ret 0xc
.cfi_endproc
.size rustix_syscall4_nr_last_fastcall, .-rustix_syscall4_nr_last_fastcall
.section .text.rustix_syscall5_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall5_nr_last_fastcall
.hidden rustix_syscall5_nr_last_fastcall
.type rustix_syscall5_nr_last_fastcall, @function
rustix_syscall5_nr_last_fastcall:
.cfi_startproc
push ebx
.cfi_def_cfa_offset 8
push edi
.cfi_def_cfa_offset 12
push esi
.cfi_def_cfa_offset 16
.cfi_offset esi, -16
.cfi_offset edi, -12
.cfi_offset ebx, -8
mov ebx,DWORD PTR [esp+0x10]
mov esi,DWORD PTR [esp+0x14]
mov edi,DWORD PTR [esp+0x18]
mov eax,DWORD PTR [esp+0x1c]
int 0x80
pop esi
.cfi_def_cfa_offset 12
pop edi
.cfi_def_cfa_offset 8
pop ebx
.cfi_def_cfa_offset 4
ret 0x10
.cfi_endproc
.size rustix_syscall5_nr_last_fastcall, .-rustix_syscall5_nr_last_fastcall
.section .text.rustix_syscall6_nr_last_fastcall,"ax",@progbits
.p2align 4
.weak rustix_syscall6_nr_last_fastcall
.hidden rustix_syscall6_nr_last_fastcall
.type rustix_syscall6_nr_last_fastcall, @function
rustix_syscall6_nr_last_fastcall:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
push ebx
.cfi_def_cfa_offset 12
push edi
.cfi_def_cfa_offset 16
push esi
.cfi_def_cfa_offset 20
.cfi_offset esi, -20
.cfi_offset edi, -16
.cfi_offset ebx, -12
.cfi_offset ebp, -8
mov ebx,DWORD PTR [esp+0x14]
mov esi,DWORD PTR [esp+0x18]
mov edi,DWORD PTR [esp+0x1c]
mov ebp,DWORD PTR [esp+0x20]
mov eax,DWORD PTR [esp+0x24]
int 0x80
pop esi
.cfi_def_cfa_offset 16
pop edi
.cfi_def_cfa_offset 12
pop ebx
.cfi_def_cfa_offset 8
pop ebp
.cfi_def_cfa_offset 4
ret 0x14
.cfi_endproc
.size rustix_syscall6_nr_last_fastcall, .-rustix_syscall6_nr_last_fastcall
.section .text.rustix_int_0x80,"ax",@progbits
.p2align 4
.weak rustix_int_0x80
.hidden rustix_int_0x80
.type rustix_int_0x80, @function
rustix_int_0x80:
.cfi_startproc
int 0x80
ret
.cfi_endproc
.size rustix_int_0x80, .-rustix_int_0x80
.section .note.GNU-stack,"",@progbits
|
I-asked/api-daemon | 5,751 | third-party/rustix-0.37.27/src/backend/linux_raw/arch/outline/mips64.s | # Assembly code for making mips64 syscalls.
#
# mips64 syscall argument register ordering is the same as the mips64
# userspace argument register ordering except that the syscall number
# (nr) is passed in v0.
#
# outline.rs takes care of reordering the nr argument to the end for us,
# so we only need to move nr into v0.
.file "mips64.s"
.section .mdebug.abi64
.previous
.abicalls
.section .text.rustix_syscall0_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall0_nr_last
.hidden rustix_syscall0_nr_last
.type rustix_syscall0_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall0_nr_last
rustix_syscall0_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $4
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall0_nr_last
.size rustix_syscall0_nr_last, .-rustix_syscall0_nr_last
.section .text.rustix_syscall1_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall1_nr_last
.hidden rustix_syscall1_nr_last
.type rustix_syscall1_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall1_nr_last
rustix_syscall1_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $5
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall1_nr_last
.size rustix_syscall1_nr_last, .-rustix_syscall1_nr_last
.section .text.rustix_syscall1_noreturn_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall1_noreturn_nr_last
.hidden rustix_syscall1_noreturn_nr_last
.type rustix_syscall1_noreturn_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall1_noreturn_nr_last
rustix_syscall1_noreturn_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $5
syscall
teq $0, $0
.end rustix_syscall1_noreturn_nr_last
.size rustix_syscall1_noreturn_nr_last, .-rustix_syscall1_noreturn_nr_last
.section .text.rustix_syscall2_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall2_nr_last
.hidden rustix_syscall2_nr_last
.type rustix_syscall2_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall2_nr_last
rustix_syscall2_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $6
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall2_nr_last
.size rustix_syscall2_nr_last, .-rustix_syscall2_nr_last
.section .text.rustix_syscall3_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall3_nr_last
.hidden rustix_syscall3_nr_last
.type rustix_syscall3_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall3_nr_last
rustix_syscall3_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $7
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall3_nr_last
.size rustix_syscall3_nr_last, .-rustix_syscall3_nr_last
.section .text.rustix_syscall4_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall4_nr_last
.hidden rustix_syscall4_nr_last
.type rustix_syscall4_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall4_nr_last
rustix_syscall4_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $8
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall4_nr_last
.size rustix_syscall4_nr_last, .-rustix_syscall4_nr_last
.section .text.rustix_syscall5_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall5_nr_last
.hidden rustix_syscall5_nr_last
.type rustix_syscall5_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall5_nr_last
rustix_syscall5_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $9
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall5_nr_last
.size rustix_syscall5_nr_last, .-rustix_syscall5_nr_last
.section .text.rustix_syscall6_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall6_nr_last
.hidden rustix_syscall6_nr_last
.type rustix_syscall6_nr_last, @function
.set nomips16
.set nomicromips
.ent rustix_syscall6_nr_last
rustix_syscall6_nr_last:
.frame $sp,0,$31
.mask 0x00000000,0
.fmask 0x00000000,0
.set noreorder
.set nomacro
move $2, $10
syscall
dnegu $12, $2
jr $31
movn $2, $12, $7
.end rustix_syscall6_nr_last
.size rustix_syscall6_nr_last, .-rustix_syscall6_nr_last
.section .note.GNU-stack,"",@progbits
|
I-asked/api-daemon | 3,525 | third-party/rustix-0.37.27/src/backend/linux_raw/arch/outline/aarch64.s | // Assembly code for making aarch64 syscalls.
//
// aarch64 syscall argument register ordering is the same as the aarch64
// userspace argument register ordering except that the syscall number
// (nr) is passed in w8.
//
// outline.rs takes care of reordering the nr argument to the end for us,
// so we only need to move nr into w8.
//
// arm64-ilp32 is not yet supported.
.file "aarch64.s"
.arch armv8-a
.section .text.rustix_syscall0_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall0_nr_last
.hidden rustix_syscall0_nr_last
.type rustix_syscall0_nr_last, @function
rustix_syscall0_nr_last:
.cfi_startproc
mov w8, w0
svc #0
ret
.cfi_endproc
.size rustix_syscall0_nr_last, .-rustix_syscall0_nr_last
.section .text.rustix_syscall1_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall1_nr_last
.hidden rustix_syscall1_nr_last
.type rustix_syscall1_nr_last, @function
rustix_syscall1_nr_last:
.cfi_startproc
mov w8, w1
svc #0
ret
.cfi_endproc
.size rustix_syscall1_nr_last, .-rustix_syscall1_nr_last
.section .text.rustix_syscall1_noreturn_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall1_noreturn_nr_last
.hidden rustix_syscall1_noreturn_nr_last
.type rustix_syscall1_noreturn_nr_last, @function
rustix_syscall1_noreturn_nr_last:
.cfi_startproc
mov w8, w1
svc #0
brk #0x1
.cfi_endproc
.size rustix_syscall1_noreturn_nr_last, .-rustix_syscall1_noreturn_nr_last
.section .text.rustix_syscall2_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall2_nr_last
.hidden rustix_syscall2_nr_last
.type rustix_syscall2_nr_last, @function
rustix_syscall2_nr_last:
.cfi_startproc
mov w8, w2
svc #0
ret
.cfi_endproc
.size rustix_syscall2_nr_last, .-rustix_syscall2_nr_last
.section .text.rustix_syscall3_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall3_nr_last
.hidden rustix_syscall3_nr_last
.type rustix_syscall3_nr_last, @function
rustix_syscall3_nr_last:
.cfi_startproc
mov w8, w3
svc #0
ret
.cfi_endproc
.size rustix_syscall3_nr_last, .-rustix_syscall3_nr_last
.section .text.rustix_syscall4_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall4_nr_last
.hidden rustix_syscall4_nr_last
.type rustix_syscall4_nr_last, @function
rustix_syscall4_nr_last:
.cfi_startproc
mov w8, w4
svc #0
ret
.cfi_endproc
.size rustix_syscall4_nr_last, .-rustix_syscall4_nr_last
.section .text.rustix_syscall5_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall5_nr_last
.hidden rustix_syscall5_nr_last
.type rustix_syscall5_nr_last, @function
rustix_syscall5_nr_last:
.cfi_startproc
mov w8, w5
svc #0
ret
.cfi_endproc
.size rustix_syscall5_nr_last, .-rustix_syscall5_nr_last
.section .text.rustix_syscall6_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall6_nr_last
.hidden rustix_syscall6_nr_last
.type rustix_syscall6_nr_last, @function
rustix_syscall6_nr_last:
.cfi_startproc
mov w8, w6
svc #0
ret
.cfi_endproc
.size rustix_syscall6_nr_last, .-rustix_syscall6_nr_last
.section .note.GNU-stack,"",@progbits
|
I-asked/api-daemon | 3,592 | third-party/rustix-0.37.27/src/backend/linux_raw/arch/outline/powerpc64.s | # Assembly code for making powerpc64le syscalls.
#
# powerpc64le syscall argument register ordering is the same as the
# powerpc64le userspace argument register ordering except that the syscall
# number (nr) is passed in r0.
#
# outline.rs takes care of reordering the nr argument to the end for us,
# so we only need to move nr into r0.
.file "powerpc64le.s"
.machine power8
.abiversion 2
.section .text.rustix_syscall0_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall0_nr_last
.hidden rustix_syscall0_nr_last
.type rustix_syscall0_nr_last, @function
rustix_syscall0_nr_last:
.cfi_startproc
mr 0, 3
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall0_nr_last, .-rustix_syscall0_nr_last
.section .text.rustix_syscall1_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall1_nr_last
.hidden rustix_syscall1_nr_last
.type rustix_syscall1_nr_last, @function
rustix_syscall1_nr_last:
.cfi_startproc
mr 0, 4
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall1_nr_last, .-rustix_syscall1_nr_last
.section .text.rustix_syscall1_noreturn_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall1_noreturn_nr_last
.hidden rustix_syscall1_noreturn_nr_last
.type rustix_syscall1_noreturn_nr_last, @function
rustix_syscall1_noreturn_nr_last:
.cfi_startproc
mr 0, 4
sc
trap
.cfi_endproc
.size rustix_syscall1_noreturn_nr_last, .-rustix_syscall1_noreturn_nr_last
.section .text.rustix_syscall2_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall2_nr_last
.hidden rustix_syscall2_nr_last
.type rustix_syscall2_nr_last, @function
rustix_syscall2_nr_last:
.cfi_startproc
mr 0, 5
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall2_nr_last, .-rustix_syscall2_nr_last
.section .text.rustix_syscall3_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall3_nr_last
.hidden rustix_syscall3_nr_last
.type rustix_syscall3_nr_last, @function
rustix_syscall3_nr_last:
.cfi_startproc
mr 0, 6
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall3_nr_last, .-rustix_syscall3_nr_last
.section .text.rustix_syscall4_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall4_nr_last
.hidden rustix_syscall4_nr_last
.type rustix_syscall4_nr_last, @function
rustix_syscall4_nr_last:
.cfi_startproc
mr 0, 7
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall4_nr_last, .-rustix_syscall4_nr_last
.section .text.rustix_syscall5_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall5_nr_last
.hidden rustix_syscall5_nr_last
.type rustix_syscall5_nr_last, @function
rustix_syscall5_nr_last:
.cfi_startproc
mr 0, 8
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall5_nr_last, .-rustix_syscall5_nr_last
.section .text.rustix_syscall6_nr_last,"ax",@progbits
.p2align 2
.weak rustix_syscall6_nr_last
.hidden rustix_syscall6_nr_last
.type rustix_syscall6_nr_last, @function
rustix_syscall6_nr_last:
.cfi_startproc
mr 0, 9
sc
bnslr
neg 3, 3
blr
.cfi_endproc
.size rustix_syscall6_nr_last, .-rustix_syscall6_nr_last
.section .note.GNU-stack,"",@progbits
|
icaruss1/c_compiler | 444 | src/c_files/return_2.s | .file "return_2.c"
.text
.section .text.startup,"ax",@progbits
.p2align 4
.globl main
.type main, @function
main:
endbr64
movl $2, %eax
ret
.size main, .-main
.ident "GCC: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
iCodeIN/pathos-rs | 554 | src/asm/boot.s | .option norvc
.section .text.boot
.global _start
_start:
csrw satp, zero # Disable paging
la a0, _bss_start # Initialize BSS section to zero
la a1, _bss_end
bgeu a0, a1, 2f
1:
sd zero, (a0)
addi a0, a0, 8
bltu a0, a1, 1b
2:
la sp, _stack_end # Prepare to switch to Rust-based entry code
csrwi pmpcfg0, 0xf # Let S-mode access all physical memory
li t0, 0x3fffffffffffff
csrw pmpaddr0, t0
call kinit
|
iCodeIN/pathos-rs | 1,081 | src/asm/mem.s | .section .rodata
.global HEAP_START
HEAP_START:
.dword _heap_start
.global HEAP_SIZE
HEAP_SIZE:
.dword _heap_size
# .global HEAP_END
# HEAP_END:
# .dword _heap_end
.global ALLOC_START
ALLOC_START:
.dword _alloc_start
.global ALLOC_SIZE
ALLOC_SIZE:
.dword _alloc_size
.global TEXT_START
TEXT_START:
.dword _text_start
.global TEXT_END
TEXT_END:
.dword _text_end
.global DATA_START
DATA_START:
.dword _data_start
.global DATA_END
DATA_END:
.dword _data_end
.global RODATA_START
RODATA_START:
.dword _rodata_start
.global RODATA_END
RODATA_END:
.dword _rodata_end
.global BSS_START
BSS_START:
.dword _bss_start
.global BSS_END
BSS_END:
.dword _bss_end
.global KERNEL_STACK_START
KERNEL_STACK_START:
.dword _stack_start
.global KERNEL_STACK_END
KERNEL_STACK_END:
.dword _stack_end
.global MEMORY_START
MEMORY_START:
.dword _memory_start
.global MEMORY_END
MEMORY_END:
.dword _memory_end
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/udp/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/ip-name-lookup/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/instance-network/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/network/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/tcp-create-socket/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/tcp/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/sockets/udp-create-socket/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/io/error/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/io/streams/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/io/poll/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/http/types/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/http/outgoing-handler/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/stderr/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/terminal-stderr/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/terminal-output/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/terminal-input/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/stdout/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/terminal-stdout/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/exit/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/terminal-stdin/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/environment/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/cli/stdin/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/config/runtime/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/filesystem/types/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/filesystem/preopens/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/random/insecure/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/random/random/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/random/insecure-seed/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/clocks/wall-clock/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/clocks/monotonic-clock/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasi/logging/logging/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasmcloud/postgres/query/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasmcloud/secrets/reveal/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasmcloud/secrets/store/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasmcloud/bus/lattice/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/sqldb-postgres-query/gen/wasmcloud/examples/invoke/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/udp/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/ip-name-lookup/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/instance-network/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/network/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/tcp-create-socket/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/tcp/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/sockets/udp-create-socket/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/io/error/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/io/streams/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/io/poll/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/http/types/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/http/outgoing-handler/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/http/incoming-handler/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/stderr/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/terminal-stderr/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/terminal-output/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/terminal-input/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/stdout/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/terminal-stdout/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/exit/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/terminal-stdin/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/environment/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/cli/stdin/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/config/runtime/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/filesystem/types/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/filesystem/preopens/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/random/insecure/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/random/random/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/random/insecure-seed/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/clocks/wall-clock/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/clocks/monotonic-clock/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasi/logging/logging/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasmcloud/secrets/reveal/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasmcloud/secrets/store/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
idlab-discover/masters-lin-tian | 194 | examples/golang/components/http-client-tinygo/gen/wasmcloud/bus/lattice/empty.s | // This file exists for testing this package without WebAssembly,
// allowing empty function bodies with a //go:wasmimport directive.
// See https://pkg.go.dev/cmd/compile for more information.
|
IEEE-UCF/2025-AMDHardware-GPU | 573 | sw/shader_asm/peak_detector.s | // setup registers
addi r1, r0, 0x2000 // input buffer base
addi r2, r0, 0x3000 // output buffer base
addi r3, r0, 256 // input buffer size
addi r4, r0, 0 // loop counter i = 0
addi r5, r0, 128 // threshold value
loop:
// calculate address of in[i] and load it
add r11, r1, r4
load r6, r11, 0
// compare and branch if not a peak (if r6 < r5)
jumpl r6, r5, not_peak
// it's a peak, so store it and increment output pointer
store r6, r2, 0
addi r2, r2, 4
not_peak:
// increment counter and loop if not done (if r4 < r3)
addi r4, r4, 1
jumpl r4, r3, loop
// finished
halt |
IEEE-UCF/2025-AMDHardware-GPU | 445 | sw/shader_asm/triangle_shader.s | // load vertex coordinates
addi r1, r0, 100
addi r2, r0, 100
addi r3, r0, 200
addi r4, r0, 100
addi r5, r0, 150
addi r6, r0, 200
// load command buffer base address
addi r10, r0, 0x1000
// store vertex data into the command buffer
store r1, r10, 0
store r2, r10, 4
store r3, r10, 8
store r4, r10, 12
store r5, r10, 16
store r6, r10, 20
// issue the draw command by writing '1'
addi r1, r0, 1
store r1, r10, 24
// loop forever
loop:
jump loop |
ifd3f/astridos | 5,583 | kernel/arch/x86_64/start.S | /*
* Rust BareBones OS
* - By John Hodge (Mutabah/thePowersGang)
* - Modified Slightly by Astrid Yu
* (see https://github.com/thepowersgang/rust-barebones-kernel/blob/master/Kernel/arch/amd64/start.S)
*
* arcm/amd64/start.S
* - AMD64 Entrypoint
*/
/* The kernel is linked to run at -2GB. This allows efficient addressing */
KERNEL_BASE = 0xFFFFFFFF80000000
/* === Multiboot Header === */
MULTIBOOT_PAGE_ALIGN = (1<<0)
MULTIBOOT_MEMORY_INFO = (1<<1)
MULTIBOOT_REQVIDMODE = (1<<2)
MULTIBOOT_HEADER_MAGIC = 0x1BADB002
MULTIBOOT_HEADER_FLAGS = (MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO | MULTIBOOT_REQVIDMODE)
MULTIBOOT_CHECKSUM = -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
.section .multiboot, "a"
.globl mboot
mboot:
.long MULTIBOOT_HEADER_MAGIC
.long MULTIBOOT_HEADER_FLAGS
.long MULTIBOOT_CHECKSUM
.long mboot
/* a.out kludge (not used, the kernel is elf) */
.long 0, 0, 0, 0 /* load_addr, load_end_addr, bss_end_addr, entry_addr */
/* Video mode */
.long 0 /* Mode type (0: LFB) */
.long 0 /* Width (no preference) */
.long 0 /* Height (no preference) */
.long 32 /* Depth (32-bit preferred) */
#define DEBUG(c) mov $0x3f8, %dx ; mov $c, %al ; outb %al, %dx
/* === Code === */
.section .inittext, "ax"
.globl start
.code32
start:
/* The kernel starts in protected mode (32-bit mode, we want to switch to long mode) */
/* 1. Save multiboot state */
mov %eax, mboot_sig - KERNEL_BASE
mov %ebx, mboot_ptr - KERNEL_BASE
/* 2. Ensure that the CPU support long mode.
CPUID instructions essentially use %eax as the argument for the specific
CPUID leaf to query. For more information, see https://en.wikipedia.org/wiki/CPUID#Calling_CPUID
*/
mov $0x80000000, %eax
cpuid
/* - Check if CPUID supports the field we want to query */
cmp $0x80000001, %eax
jbe not64bitCapable
/* - Test the IA-32e bit */
mov $0x80000001, %eax
cpuid
test $0x20000000, %edx /* bit 29 = */
jz not64bitCapable
/* 3. Set up state for long mode */
/* Enable:
PGE (Page Global Enable)
+ PAE (Physical Address Extension)
+ PSE (Page Size Extensions)
*/
mov %cr4, %eax
or $(0x80|0x20|0x10), %eax
mov %eax, %cr4
/* Load PDP4 */
mov $(init_pml4 - KERNEL_BASE), %eax
mov %eax, %cr3
/* Enable IA-32e mode (Also enables SYSCALL and NX) */
mov $0xC0000080, %ecx
rdmsr
or $(1 << 11)|(1 << 8)|(1 << 0), %eax /* NXE, LME, SCE */
wrmsr
/* Enable paging and enter long mode */
mov %cr0, %eax
or $0x80010000, %eax /* PG & WP */
mov %eax, %cr0
lgdt GDTPtr_low - KERNEL_BASE
ljmp $0x08, $start64
not64bitCapable:
/* If the CPU isn't 64-bit capable, print a message to serial/b8000 then busy wait */
mov $0x3f8, %dx
mov $'N', %al ; outb %al, %dx
movw $0x100|'N', 0xb8000
mov $'o', %al ; outb %al, %dx
movw $0x100|'o', 0xb8002
mov $'t', %al ; outb %al, %dx
movw $0x100|'t', 0xb8004
mov $'6', %al ; outb %al, %dx
movw $0x100|'6', 0xb8006
mov $'4', %al ; outb %al, %dx
movw $0x100|'4', 0xb8008
not64bitCapable.loop:
hlt
jmp not64bitCapable.loop
.code64
.globl start64
start64:
/* Running in 64-bit mode, jump to high memory */
lgdt GDTPtr
mov $start64_high, %rax
jmp *%rax
.section .text
.extern kmain
.globl start64_high
start64_high:
/* and clear low-memory mapping */
mov $0, %rax
mov %rax, init_pml4 - KERNEL_BASE + 0
/* Set up segment registers */
mov $0x10, %ax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
/* Set up stack pointer */
mov $init_stack, %rsp
/* call the rust code */
call kmain
/* and if that returns (it shouldn't) loop forever */
start64.loop:
hlt
jmp start64.loop
/*
RDI = Destination
RSI = Value
RDX = Count
*/
.section .text.memset
.globl memset
memset:
mov %rsi, %rax
mov %rdx, %rcx
rep stosb
ret
/*
RDI = Destination
RSI = Source
RDX = Count
*/
.section .text.memcpy
.globl memcpy
memcpy:
mov %rdx, %rcx
rep movsb
ret
/* === Page-aligned data === */
.section .padata
/* Initial paging structures, four levels */
/* The +3 for sub-pages indicates "present (1) + writable (2)" */
init_pml4:
.quad low_pdpt - KERNEL_BASE + 3 /* low map for startup, will be cleared before rust code runs */
.rept 512 - 3
.quad 0
.endr
.quad 0 /* If you so wish, this is a good place for the "Fractal" mapping */
.quad init_pdpt - KERNEL_BASE + 3 /* Final mapping */
low_pdpt:
.quad init_pd - KERNEL_BASE + 3 /* early init identity map */
.rept 512 - 1
.quad 0
.endr
init_pdpt: /* covers the top 512GB, 1GB each entry */
.rept 512 - 2
.quad 0
.endr
.quad init_pd - KERNEL_BASE + 3 /* at -2GB, identity map the kernel image */
.quad 0
init_pd:
/* 0x80 = Page size extension */
.quad 0x000000 + 0x80 + 3 /* Map 2MB, enough for a 1MB kernel */
.quad 0x200000 + 0x80 + 3 /* - give it another 2MB, just in case */
.rept 512 - 2
.quad 0
.endr
init_stack_base:
.rept 0x1000 * 2
.byte 0
.endr
init_stack:
/* === General Data === */
.section .data
.globl mboot_sig
.globl mboot_ptr
mboot_sig: .long 0
mboot_ptr: .long 0
/* Global Descriptor Table */
GDTPtr_low:
.word GDTEnd - GDT - 1
.long GDT - KERNEL_BASE
GDTPtr:
.word GDTEnd - GDT - 1
.quad GDT
.globl GDT
GDT:
.long 0, 0
.long 0x00000000, 0x00209A00 /* 0x08: 64-bit Code */
.long 0x00000000, 0x00009200 /* 0x10: 64-bit Data */
.long 0x00000000, 0x0040FA00 /* 0x18: 32-bit User Code */
.long 0x00000000, 0x0040F200 /* 0x20: User Data */
.long 0x00000000, 0x0020FA00 /* 0x28: 64-bit User Code */
.long 0x00000000, 0x0000F200 /* 0x30: User Data (64 version) */
GDTEnd: |
iMAGRAY/Shelldone | 184 | deps/cairo/pixman/arm-simd-test.S | .text
.arch armv6
.object_arch armv4
.arm
.altmacro
#ifndef __ARM_EABI__
#error EABI is required (to be sure that calling conventions are compatible)
#endif
pld [r0]
uqadd8 r0, r0, r0
|
iMAGRAY/Shelldone | 68 | deps/cairo/pixman/a64-neon-test.S | .text
.arch armv8-a
.altmacro
prfm pldl2strm, [x0]
xtn v0.8b, v0.8h
|
iMAGRAY/Shelldone | 216 | deps/cairo/pixman/neon-test.S | .text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0
.arm
.altmacro
#ifndef __ARM_EABI__
#error EABI is required (to be sure that calling conventions are compatible)
#endif
pld [r0]
vmovn.u16 d0, q0
|
iMAGRAY/Shelldone | 10,556 | deps/cairo/pixman/pixman/pixman-mips-memcpy-asm.S | /*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "pixman-mips-dspr2-asm.h"
/*
* This routine could be optimized for MIPS64. The current code only
* uses MIPS32 instructions.
*/
#ifdef EB
# define LWHI lwl /* high part is left in big-endian */
# define SWHI swl /* high part is left in big-endian */
# define LWLO lwr /* low part is right in big-endian */
# define SWLO swr /* low part is right in big-endian */
#else
# define LWHI lwr /* high part is right in little-endian */
# define SWHI swr /* high part is right in little-endian */
# define LWLO lwl /* low part is left in big-endian */
# define SWLO swl /* low part is left in big-endian */
#endif
LEAF_MIPS32R2(pixman_mips_fast_memcpy)
slti AT, a2, 8
bne AT, zero, $last8
move v0, a0 /* memcpy returns the dst pointer */
/* Test if the src and dst are word-aligned, or can be made word-aligned */
xor t8, a1, a0
andi t8, t8, 0x3 /* t8 is a0/a1 word-displacement */
bne t8, zero, $unaligned
negu a3, a0
andi a3, a3, 0x3 /* we need to copy a3 bytes to make a0/a1 aligned */
beq a3, zero, $chk16w /* when a3=0 then the dst (a0) is word-aligned */
subu a2, a2, a3 /* now a2 is the remining bytes count */
LWHI t8, 0(a1)
addu a1, a1, a3
SWHI t8, 0(a0)
addu a0, a0, a3
/* Now the dst/src are mutually word-aligned with word-aligned addresses */
$chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */
/* t8 is the byte count after 64-byte chunks */
beq a2, t8, $chk8w /* if a2==t8, no 64-byte chunks */
/* There will be at most 1 32-byte chunk after it */
subu a3, a2, t8 /* subtract from a2 the reminder */
/* Here a3 counts bytes in 16w chunks */
addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */
addu t0, a0, a2 /* t0 is the "past the end" address */
/*
* When in the loop we exercise "pref 30, x(a0)", the a0+x should not be past
* the "t0-32" address
* This means: for x=128 the last "safe" a0 address is "t0-160"
* Alternatively, for x=64 the last "safe" a0 address is "t0-96"
* In the current version we use "pref 30, 128(a0)", so "t0-160" is the limit
*/
subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */
pref 0, 0(a1) /* bring the first line of src, addr 0 */
pref 0, 32(a1) /* bring the second line of src, addr 32 */
pref 0, 64(a1) /* bring the third line of src, addr 64 */
pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */
/* In case the a0 > t9 don't use "pref 30" at all */
sgtu v1, a0, t9
bgtz v1, $loop16w /* skip "pref 30, 64(a0)" for too short arrays */
nop
/* otherwise, start with using pref30 */
pref 30, 64(a0)
$loop16w:
pref 0, 96(a1)
lw t0, 0(a1)
bgtz v1, $skip_pref30_96 /* skip "pref 30, 96(a0)" */
lw t1, 4(a1)
pref 30, 96(a0) /* continue setting up the dest, addr 96 */
$skip_pref30_96:
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 16(a1)
lw t5, 20(a1)
lw t6, 24(a1)
lw t7, 28(a1)
pref 0, 128(a1) /* bring the next lines of src, addr 128 */
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
lw t0, 32(a1)
bgtz v1, $skip_pref30_128 /* skip "pref 30, 128(a0)" */
lw t1, 36(a1)
pref 30, 128(a0) /* continue setting up the dest, addr 128 */
$skip_pref30_128:
lw t2, 40(a1)
lw t3, 44(a1)
lw t4, 48(a1)
lw t5, 52(a1)
lw t6, 56(a1)
lw t7, 60(a1)
pref 0, 160(a1) /* bring the next lines of src, addr 160 */
sw t0, 32(a0)
sw t1, 36(a0)
sw t2, 40(a0)
sw t3, 44(a0)
sw t4, 48(a0)
sw t5, 52(a0)
sw t6, 56(a0)
sw t7, 60(a0)
addiu a0, a0, 64 /* adding 64 to dest */
sgtu v1, a0, t9
bne a0, a3, $loop16w
addiu a1, a1, 64 /* adding 64 to src */
move a2, t8
/* Here we have src and dest word-aligned but less than 64-bytes to go */
$chk8w:
pref 0, 0x0(a1)
andi t8, a2, 0x1f /* is there a 32-byte chunk? */
/* the t8 is the reminder count past 32-bytes */
beq a2, t8, $chk1w /* when a2=t8, no 32-byte chunk */
nop
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 16(a1)
lw t5, 20(a1)
lw t6, 24(a1)
lw t7, 28(a1)
addiu a1, a1, 32
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
addiu a0, a0, 32
$chk1w:
andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */
beq a2, t8, $last8
subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */
addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */
/* copying in words (4-byte chunks) */
$wordCopy_loop:
lw t3, 0(a1) /* the first t3 may be equal t0 ... optimize? */
addiu a1, a1, 4
addiu a0, a0, 4
bne a0, a3, $wordCopy_loop
sw t3, -4(a0)
/* For the last (<8) bytes */
$last8:
blez a2, leave
addu a3, a0, a2 /* a3 is the last dst address */
$last8loop:
lb v1, 0(a1)
addiu a1, a1, 1
addiu a0, a0, 1
bne a0, a3, $last8loop
sb v1, -1(a0)
leave: j ra
nop
/*
* UNALIGNED case
*/
$unaligned:
/* got here with a3="negu a0" */
andi a3, a3, 0x3 /* test if the a0 is word aligned */
beqz a3, $ua_chk16w
subu a2, a2, a3 /* bytes left after initial a3 bytes */
LWHI v1, 0(a1)
LWLO v1, 3(a1)
addu a1, a1, a3 /* a3 may be here 1, 2 or 3 */
SWHI v1, 0(a0)
addu a0, a0, a3 /* below the dst will be word aligned (NOTE1) */
$ua_chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */
/* t8 is the byte count after 64-byte chunks */
beq a2, t8, $ua_chk8w /* if a2==t8, no 64-byte chunks */
/* There will be at most 1 32-byte chunk after it */
subu a3, a2, t8 /* subtract from a2 the reminder */
/* Here a3 counts bytes in 16w chunks */
addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */
addu t0, a0, a2 /* t0 is the "past the end" address */
subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */
pref 0, 0(a1) /* bring the first line of src, addr 0 */
pref 0, 32(a1) /* bring the second line of src, addr 32 */
pref 0, 64(a1) /* bring the third line of src, addr 64 */
pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */
/* In case the a0 > t9 don't use "pref 30" at all */
sgtu v1, a0, t9
bgtz v1, $ua_loop16w /* skip "pref 30, 64(a0)" for too short arrays */
nop
/* otherwise, start with using pref30 */
pref 30, 64(a0)
$ua_loop16w:
pref 0, 96(a1)
LWHI t0, 0(a1)
LWLO t0, 3(a1)
LWHI t1, 4(a1)
bgtz v1, $ua_skip_pref30_96
LWLO t1, 7(a1)
pref 30, 96(a0) /* continue setting up the dest, addr 96 */
$ua_skip_pref30_96:
LWHI t2, 8(a1)
LWLO t2, 11(a1)
LWHI t3, 12(a1)
LWLO t3, 15(a1)
LWHI t4, 16(a1)
LWLO t4, 19(a1)
LWHI t5, 20(a1)
LWLO t5, 23(a1)
LWHI t6, 24(a1)
LWLO t6, 27(a1)
LWHI t7, 28(a1)
LWLO t7, 31(a1)
pref 0, 128(a1) /* bring the next lines of src, addr 128 */
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
LWHI t0, 32(a1)
LWLO t0, 35(a1)
LWHI t1, 36(a1)
bgtz v1, $ua_skip_pref30_128
LWLO t1, 39(a1)
pref 30, 128(a0) /* continue setting up the dest, addr 128 */
$ua_skip_pref30_128:
LWHI t2, 40(a1)
LWLO t2, 43(a1)
LWHI t3, 44(a1)
LWLO t3, 47(a1)
LWHI t4, 48(a1)
LWLO t4, 51(a1)
LWHI t5, 52(a1)
LWLO t5, 55(a1)
LWHI t6, 56(a1)
LWLO t6, 59(a1)
LWHI t7, 60(a1)
LWLO t7, 63(a1)
pref 0, 160(a1) /* bring the next lines of src, addr 160 */
sw t0, 32(a0)
sw t1, 36(a0)
sw t2, 40(a0)
sw t3, 44(a0)
sw t4, 48(a0)
sw t5, 52(a0)
sw t6, 56(a0)
sw t7, 60(a0)
addiu a0, a0, 64 /* adding 64 to dest */
sgtu v1, a0, t9
bne a0, a3, $ua_loop16w
addiu a1, a1, 64 /* adding 64 to src */
move a2, t8
/* Here we have src and dest word-aligned but less than 64-bytes to go */
$ua_chk8w:
pref 0, 0x0(a1)
andi t8, a2, 0x1f /* is there a 32-byte chunk? */
/* the t8 is the reminder count */
beq a2, t8, $ua_chk1w /* when a2=t8, no 32-byte chunk */
LWHI t0, 0(a1)
LWLO t0, 3(a1)
LWHI t1, 4(a1)
LWLO t1, 7(a1)
LWHI t2, 8(a1)
LWLO t2, 11(a1)
LWHI t3, 12(a1)
LWLO t3, 15(a1)
LWHI t4, 16(a1)
LWLO t4, 19(a1)
LWHI t5, 20(a1)
LWLO t5, 23(a1)
LWHI t6, 24(a1)
LWLO t6, 27(a1)
LWHI t7, 28(a1)
LWLO t7, 31(a1)
addiu a1, a1, 32
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
addiu a0, a0, 32
$ua_chk1w:
andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */
beq a2, t8, $ua_smallCopy
subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */
addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */
/* copying in words (4-byte chunks) */
$ua_wordCopy_loop:
LWHI v1, 0(a1)
LWLO v1, 3(a1)
addiu a1, a1, 4
addiu a0, a0, 4 /* note: dst=a0 is word aligned here, see NOTE1 */
bne a0, a3, $ua_wordCopy_loop
sw v1, -4(a0)
/* Now less than 4 bytes (value in a2) left to copy */
$ua_smallCopy:
beqz a2, leave
addu a3, a0, a2 /* a3 is the last dst address */
$ua_smallCopy_loop:
lb v1, 0(a1)
addiu a1, a1, 1
addiu a0, a0, 1
bne a0, a3, $ua_smallCopy_loop
sb v1, -1(a0)
j ra
nop
END(pixman_mips_fast_memcpy)
|
iMAGRAY/Shelldone | 43,257 | deps/cairo/pixman/pixman/pixman-arm-simd-asm.S | /*
* Copyright © 2012 Raspberry Pi Foundation
* Copyright © 2012 RISC OS Open Ltd
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of the copyright holders not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. The copyright holders make no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Author: Ben Avison (bavison@riscosopen.org)
*
*/
/* Prevent the stack from becoming executable */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.arch armv6
.object_arch armv4
.arm
.altmacro
.p2align 2
#include "pixman-arm-asm.h"
#include "pixman-arm-simd-asm.h"
/* A head macro should do all processing which results in an output of up to
* 16 bytes, as far as the final load instruction. The corresponding tail macro
* should complete the processing of the up-to-16 bytes. The calling macro will
* sometimes choose to insert a preload or a decrement of X between them.
* cond ARM condition code for code block
* numbytes Number of output bytes that should be generated this time
* firstreg First WK register in which to place output
* unaligned_src Whether to use non-wordaligned loads of source image
* unaligned_mask Whether to use non-wordaligned loads of mask image
* preload If outputting 16 bytes causes 64 bytes to be read, whether an extra preload should be output
*/
.macro blit_init
line_saved_regs STRIDE_D, STRIDE_S
.endm
.macro blit_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
pixld cond, numbytes, firstreg, SRC, unaligned_src
.endm
.macro blit_inner_loop process_head, process_tail, unaligned_src, unaligned_mask, dst_alignment
WK4 .req STRIDE_D
WK5 .req STRIDE_S
WK6 .req MASK
WK7 .req STRIDE_M
110: pixld , 16, 0, SRC, unaligned_src
pixld , 16, 4, SRC, unaligned_src
pld [SRC, SCRATCH]
pixst , 16, 0, DST
pixst , 16, 4, DST
subs X, X, #32*8/src_bpp
bhs 110b
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
generate_composite_function \
pixman_composite_src_8888_8888_asm_armv6, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \
4, /* prefetch distance */ \
blit_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
blit_process_head, \
nop_macro, /* process tail */ \
blit_inner_loop
generate_composite_function \
pixman_composite_src_0565_0565_asm_armv6, 16, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \
4, /* prefetch distance */ \
blit_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
blit_process_head, \
nop_macro, /* process tail */ \
blit_inner_loop
generate_composite_function \
pixman_composite_src_8_8_asm_armv6, 8, 0, 8, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \
3, /* prefetch distance */ \
blit_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
blit_process_head, \
nop_macro, /* process tail */ \
blit_inner_loop
/******************************************************************************/
.macro src_n_8888_init
ldr SRC, [sp, #ARGS_STACK_OFFSET]
mov STRIDE_S, SRC
mov MASK, SRC
mov STRIDE_M, SRC
.endm
.macro src_n_0565_init
ldrh SRC, [sp, #ARGS_STACK_OFFSET]
orr SRC, SRC, lsl #16
mov STRIDE_S, SRC
mov MASK, SRC
mov STRIDE_M, SRC
.endm
.macro src_n_8_init
ldrb SRC, [sp, #ARGS_STACK_OFFSET]
orr SRC, SRC, lsl #8
orr SRC, SRC, lsl #16
mov STRIDE_S, SRC
mov MASK, SRC
mov STRIDE_M, SRC
.endm
.macro fill_process_tail cond, numbytes, firstreg
WK4 .req SRC
WK5 .req STRIDE_S
WK6 .req MASK
WK7 .req STRIDE_M
pixst cond, numbytes, 4, DST
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
generate_composite_function \
pixman_composite_src_n_8888_asm_armv6, 0, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \
0, /* prefetch distance doesn't apply */ \
src_n_8888_init \
nop_macro, /* newline */ \
nop_macro /* cleanup */ \
nop_macro /* process head */ \
fill_process_tail
generate_composite_function \
pixman_composite_src_n_0565_asm_armv6, 0, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \
0, /* prefetch distance doesn't apply */ \
src_n_0565_init \
nop_macro, /* newline */ \
nop_macro /* cleanup */ \
nop_macro /* process head */ \
fill_process_tail
generate_composite_function \
pixman_composite_src_n_8_asm_armv6, 0, 0, 8, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \
0, /* prefetch distance doesn't apply */ \
src_n_8_init \
nop_macro, /* newline */ \
nop_macro /* cleanup */ \
nop_macro /* process head */ \
fill_process_tail
/******************************************************************************/
.macro src_x888_8888_pixel, cond, reg
orr&cond WK®, WK®, #0xFF000000
.endm
.macro pixman_composite_src_x888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
pixld cond, numbytes, firstreg, SRC, unaligned_src
.endm
.macro pixman_composite_src_x888_8888_process_tail cond, numbytes, firstreg
src_x888_8888_pixel cond, %(firstreg+0)
.if numbytes >= 8
src_x888_8888_pixel cond, %(firstreg+1)
.if numbytes == 16
src_x888_8888_pixel cond, %(firstreg+2)
src_x888_8888_pixel cond, %(firstreg+3)
.endif
.endif
.endm
generate_composite_function \
pixman_composite_src_x888_8888_asm_armv6, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_SCRATCH, \
3, /* prefetch distance */ \
nop_macro, /* init */ \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
pixman_composite_src_x888_8888_process_head, \
pixman_composite_src_x888_8888_process_tail
/******************************************************************************/
.macro src_0565_8888_init
/* Hold loop invariants in MASK and STRIDE_M */
ldr MASK, =0x07E007E0
mov STRIDE_M, #0xFF000000
/* Set GE[3:0] to 1010 so SEL instructions do what we want */
ldr SCRATCH, =0x80008000
uadd8 SCRATCH, SCRATCH, SCRATCH
.endm
.macro src_0565_8888_2pixels, reg1, reg2
and SCRATCH, WK®1, MASK @ 00000GGGGGG0000000000gggggg00000
bic WK®2, WK®1, MASK @ RRRRR000000BBBBBrrrrr000000bbbbb
orr SCRATCH, SCRATCH, SCRATCH, lsr #6 @ 00000GGGGGGGGGGGG0000ggggggggggg
mov WK®1, WK®2, lsl #16 @ rrrrr000000bbbbb0000000000000000
mov SCRATCH, SCRATCH, ror #19 @ GGGG0000ggggggggggg00000GGGGGGGG
bic WK®2, WK®2, WK®1, lsr #16 @ RRRRR000000BBBBB0000000000000000
orr WK®1, WK®1, WK®1, lsr #5 @ rrrrrrrrrr0bbbbbbbbbb00000000000
orr WK®2, WK®2, WK®2, lsr #5 @ RRRRRRRRRR0BBBBBBBBBB00000000000
pkhtb WK®1, WK®1, WK®1, asr #5 @ rrrrrrrr--------bbbbbbbb--------
sel WK®1, WK®1, SCRATCH @ rrrrrrrrggggggggbbbbbbbb--------
mov SCRATCH, SCRATCH, ror #16 @ ggg00000GGGGGGGGGGGG0000gggggggg
pkhtb WK®2, WK®2, WK®2, asr #5 @ RRRRRRRR--------BBBBBBBB--------
sel WK®2, WK®2, SCRATCH @ RRRRRRRRGGGGGGGGBBBBBBBB--------
orr WK®1, STRIDE_M, WK®1, lsr #8 @ 11111111rrrrrrrrggggggggbbbbbbbb
orr WK®2, STRIDE_M, WK®2, lsr #8 @ 11111111RRRRRRRRGGGGGGGGBBBBBBBB
.endm
/* This version doesn't need STRIDE_M, but is one instruction longer.
It would however be preferable for an XRGB target, since we could knock off the last 2 instructions, but is that a common case?
and SCRATCH, WK®1, MASK @ 00000GGGGGG0000000000gggggg00000
bic WK®1, WK®1, MASK @ RRRRR000000BBBBBrrrrr000000bbbbb
orr SCRATCH, SCRATCH, SCRATCH, lsr #6 @ 00000GGGGGGGGGGGG0000ggggggggggg
mov WK®2, WK®1, lsr #16 @ 0000000000000000RRRRR000000BBBBB
mov SCRATCH, SCRATCH, ror #27 @ GGGGGGGGGGGG0000ggggggggggg00000
bic WK®1, WK®1, WK®2, lsl #16 @ 0000000000000000rrrrr000000bbbbb
mov WK®2, WK®2, lsl #3 @ 0000000000000RRRRR000000BBBBB000
mov WK®1, WK®1, lsl #3 @ 0000000000000rrrrr000000bbbbb000
orr WK®2, WK®2, WK®2, lsr #5 @ 0000000000000RRRRRRRRRR0BBBBBBBB
orr WK®1, WK®1, WK®1, lsr #5 @ 0000000000000rrrrrrrrrr0bbbbbbbb
pkhbt WK®2, WK®2, WK®2, lsl #5 @ --------RRRRRRRR--------BBBBBBBB
pkhbt WK®1, WK®1, WK®1, lsl #5 @ --------rrrrrrrr--------bbbbbbbb
sel WK®2, SCRATCH, WK®2 @ --------RRRRRRRRGGGGGGGGBBBBBBBB
sel WK®1, SCRATCH, WK®1 @ --------rrrrrrrrggggggggbbbbbbbb
orr WK®2, WK®2, #0xFF000000 @ 11111111RRRRRRRRGGGGGGGGBBBBBBBB
orr WK®1, WK®1, #0xFF000000 @ 11111111rrrrrrrrggggggggbbbbbbbb
*/
.macro src_0565_8888_1pixel, reg
bic SCRATCH, WK®, MASK @ 0000000000000000rrrrr000000bbbbb
and WK®, WK®, MASK @ 000000000000000000000gggggg00000
mov SCRATCH, SCRATCH, lsl #3 @ 0000000000000rrrrr000000bbbbb000
mov WK®, WK®, lsl #5 @ 0000000000000000gggggg0000000000
orr SCRATCH, SCRATCH, SCRATCH, lsr #5 @ 0000000000000rrrrrrrrrr0bbbbbbbb
orr WK®, WK®, WK®, lsr #6 @ 000000000000000gggggggggggg00000
pkhbt SCRATCH, SCRATCH, SCRATCH, lsl #5 @ --------rrrrrrrr--------bbbbbbbb
sel WK®, WK®, SCRATCH @ --------rrrrrrrrggggggggbbbbbbbb
orr WK®, WK®, #0xFF000000 @ 11111111rrrrrrrrggggggggbbbbbbbb
.endm
.macro src_0565_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
.if numbytes == 16
pixldst ld,, 8, firstreg, %(firstreg+2),,, SRC, unaligned_src
.elseif numbytes == 8
pixld , 4, firstreg, SRC, unaligned_src
.elseif numbytes == 4
pixld , 2, firstreg, SRC, unaligned_src
.endif
.endm
.macro src_0565_8888_process_tail cond, numbytes, firstreg
.if numbytes == 16
src_0565_8888_2pixels firstreg, %(firstreg+1)
src_0565_8888_2pixels %(firstreg+2), %(firstreg+3)
.elseif numbytes == 8
src_0565_8888_2pixels firstreg, %(firstreg+1)
.else
src_0565_8888_1pixel firstreg
.endif
.endm
generate_composite_function \
pixman_composite_src_0565_8888_asm_armv6, 16, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER, \
3, /* prefetch distance */ \
src_0565_8888_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
src_0565_8888_process_head, \
src_0565_8888_process_tail
/******************************************************************************/
.macro src_x888_0565_init
/* Hold loop invariant in MASK */
ldr MASK, =0x001F001F
line_saved_regs STRIDE_S, ORIG_W
.endm
.macro src_x888_0565_1pixel s, d
and WK&d, MASK, WK&s, lsr #3 @ 00000000000rrrrr00000000000bbbbb
and STRIDE_S, WK&s, #0xFC00 @ 0000000000000000gggggg0000000000
orr WK&d, WK&d, WK&d, lsr #5 @ 00000000000-----rrrrr000000bbbbb
orr WK&d, WK&d, STRIDE_S, lsr #5 @ 00000000000-----rrrrrggggggbbbbb
/* Top 16 bits are discarded during the following STRH */
.endm
.macro src_x888_0565_2pixels slo, shi, d, tmp
and SCRATCH, WK&shi, #0xFC00 @ 0000000000000000GGGGGG0000000000
and WK&tmp, MASK, WK&shi, lsr #3 @ 00000000000RRRRR00000000000BBBBB
and WK&shi, MASK, WK&slo, lsr #3 @ 00000000000rrrrr00000000000bbbbb
orr WK&tmp, WK&tmp, WK&tmp, lsr #5 @ 00000000000-----RRRRR000000BBBBB
orr WK&tmp, WK&tmp, SCRATCH, lsr #5 @ 00000000000-----RRRRRGGGGGGBBBBB
and SCRATCH, WK&slo, #0xFC00 @ 0000000000000000gggggg0000000000
orr WK&shi, WK&shi, WK&shi, lsr #5 @ 00000000000-----rrrrr000000bbbbb
orr WK&shi, WK&shi, SCRATCH, lsr #5 @ 00000000000-----rrrrrggggggbbbbb
pkhbt WK&d, WK&shi, WK&tmp, lsl #16 @ RRRRRGGGGGGBBBBBrrrrrggggggbbbbb
.endm
.macro src_x888_0565_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
WK4 .req STRIDE_S
WK5 .req STRIDE_M
WK6 .req WK3
WK7 .req ORIG_W
.if numbytes == 16
pixld , 16, 4, SRC, 0
src_x888_0565_2pixels 4, 5, 0, 0
pixld , 8, 4, SRC, 0
src_x888_0565_2pixels 6, 7, 1, 1
pixld , 8, 6, SRC, 0
.else
pixld , numbytes*2, 4, SRC, 0
.endif
.endm
.macro src_x888_0565_process_tail cond, numbytes, firstreg
.if numbytes == 16
src_x888_0565_2pixels 4, 5, 2, 2
src_x888_0565_2pixels 6, 7, 3, 4
.elseif numbytes == 8
src_x888_0565_2pixels 4, 5, 1, 1
src_x888_0565_2pixels 6, 7, 2, 2
.elseif numbytes == 4
src_x888_0565_2pixels 4, 5, 1, 1
.else
src_x888_0565_1pixel 4, 1
.endif
.if numbytes == 16
pixst , numbytes, 0, DST
.else
pixst , numbytes, 1, DST
.endif
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
generate_composite_function \
pixman_composite_src_x888_0565_asm_armv6, 32, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH, \
3, /* prefetch distance */ \
src_x888_0565_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
src_x888_0565_process_head, \
src_x888_0565_process_tail
/******************************************************************************/
.macro add_8_8_8pixels cond, dst1, dst2
uqadd8&cond WK&dst1, WK&dst1, MASK
uqadd8&cond WK&dst2, WK&dst2, STRIDE_M
.endm
.macro add_8_8_4pixels cond, dst
uqadd8&cond WK&dst, WK&dst, MASK
.endm
.macro add_8_8_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
WK4 .req MASK
WK5 .req STRIDE_M
.if numbytes == 16
pixld cond, 8, 4, SRC, unaligned_src
pixld cond, 16, firstreg, DST, 0
add_8_8_8pixels cond, firstreg, %(firstreg+1)
pixld cond, 8, 4, SRC, unaligned_src
.else
pixld cond, numbytes, 4, SRC, unaligned_src
pixld cond, numbytes, firstreg, DST, 0
.endif
.unreq WK4
.unreq WK5
.endm
.macro add_8_8_process_tail cond, numbytes, firstreg
.if numbytes == 16
add_8_8_8pixels cond, %(firstreg+2), %(firstreg+3)
.elseif numbytes == 8
add_8_8_8pixels cond, firstreg, %(firstreg+1)
.else
add_8_8_4pixels cond, firstreg
.endif
.endm
generate_composite_function \
pixman_composite_add_8_8_asm_armv6, 8, 0, 8, \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_PRESERVES_SCRATCH, \
2, /* prefetch distance */ \
nop_macro, /* init */ \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
add_8_8_process_head, \
add_8_8_process_tail
/******************************************************************************/
.macro over_8888_8888_init
/* Hold loop invariant in MASK */
ldr MASK, =0x00800080
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, MASK, MASK
line_saved_regs STRIDE_D, STRIDE_S, ORIG_W
.endm
.macro over_8888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
WK4 .req STRIDE_D
WK5 .req STRIDE_S
WK6 .req STRIDE_M
WK7 .req ORIG_W
pixld , numbytes, %(4+firstreg), SRC, unaligned_src
pixld , numbytes, firstreg, DST, 0
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
.macro over_8888_8888_check_transparent numbytes, reg0, reg1, reg2, reg3
/* Since these colours a premultiplied by alpha, only 0 indicates transparent (any other colour with 0 in the alpha byte is luminous) */
teq WK®0, #0
.if numbytes > 4
teqeq WK®1, #0
.if numbytes > 8
teqeq WK®2, #0
teqeq WK®3, #0
.endif
.endif
.endm
.macro over_8888_8888_prepare next
mov WK&next, WK&next, lsr #24
.endm
.macro over_8888_8888_1pixel src, dst, offset, next
/* src = destination component multiplier */
rsb WK&src, WK&src, #255
/* Split even/odd bytes of dst into SCRATCH/dst */
uxtb16 SCRATCH, WK&dst
uxtb16 WK&dst, WK&dst, ror #8
/* Multiply through, adding 0.5 to the upper byte of result for rounding */
mla SCRATCH, SCRATCH, WK&src, MASK
mla WK&dst, WK&dst, WK&src, MASK
/* Where we would have had a stall between the result of the first MLA and the shifter input,
* reload the complete source pixel */
ldr WK&src, [SRC, #offset]
/* Multiply by 257/256 to approximate 256/255 */
uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8
/* In this stall, start processing the next pixel */
.if offset < -4
mov WK&next, WK&next, lsr #24
.endif
uxtab16 WK&dst, WK&dst, WK&dst, ror #8
/* Recombine even/odd bytes of multiplied destination */
mov SCRATCH, SCRATCH, ror #8
sel WK&dst, SCRATCH, WK&dst
/* Saturated add of source to multiplied destination */
uqadd8 WK&dst, WK&dst, WK&src
.endm
.macro over_8888_8888_process_tail cond, numbytes, firstreg
WK4 .req STRIDE_D
WK5 .req STRIDE_S
WK6 .req STRIDE_M
WK7 .req ORIG_W
over_8888_8888_check_transparent numbytes, %(4+firstreg), %(5+firstreg), %(6+firstreg), %(7+firstreg)
beq 10f
over_8888_8888_prepare %(4+firstreg)
.set PROCESS_REG, firstreg
.set PROCESS_OFF, -numbytes
.rept numbytes / 4
over_8888_8888_1pixel %(4+PROCESS_REG), %(0+PROCESS_REG), PROCESS_OFF, %(5+PROCESS_REG)
.set PROCESS_REG, PROCESS_REG+1
.set PROCESS_OFF, PROCESS_OFF+4
.endr
pixst , numbytes, firstreg, DST
10:
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
generate_composite_function \
pixman_composite_over_8888_8888_asm_armv6, 32, 0, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \
2, /* prefetch distance */ \
over_8888_8888_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
over_8888_8888_process_head, \
over_8888_8888_process_tail
/******************************************************************************/
/* Multiply each byte of a word by a byte.
* Useful when there aren't any obvious ways to fill the stalls with other instructions.
* word Register containing 4 bytes
* byte Register containing byte multiplier (bits 8-31 must be 0)
* tmp Scratch register
* half Register containing the constant 0x00800080
* GE[3:0] bits must contain 0101
*/
.macro mul_8888_8 word, byte, tmp, half
/* Split even/odd bytes of word apart */
uxtb16 tmp, word
uxtb16 word, word, ror #8
/* Multiply bytes together with rounding, then by 257/256 */
mla tmp, tmp, byte, half
mla word, word, byte, half /* 1 stall follows */
uxtab16 tmp, tmp, tmp, ror #8 /* 1 stall follows */
uxtab16 word, word, word, ror #8
/* Recombine bytes */
mov tmp, tmp, ror #8
sel word, tmp, word
.endm
/******************************************************************************/
.macro over_8888_n_8888_init
/* Mask is constant */
ldr MASK, [sp, #ARGS_STACK_OFFSET+8]
/* Hold loop invariant in STRIDE_M */
ldr STRIDE_M, =0x00800080
/* We only want the alpha bits of the constant mask */
mov MASK, MASK, lsr #24
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, STRIDE_M, STRIDE_M
line_saved_regs Y, STRIDE_D, STRIDE_S, ORIG_W
.endm
.macro over_8888_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
WK4 .req Y
WK5 .req STRIDE_D
WK6 .req STRIDE_S
WK7 .req ORIG_W
pixld , numbytes, %(4+(firstreg%2)), SRC, unaligned_src
pixld , numbytes, firstreg, DST, 0
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
.macro over_8888_n_8888_1pixel src, dst
mul_8888_8 WK&src, MASK, SCRATCH, STRIDE_M
sub WK7, WK6, WK&src, lsr #24
mul_8888_8 WK&dst, WK7, SCRATCH, STRIDE_M
uqadd8 WK&dst, WK&dst, WK&src
.endm
.macro over_8888_n_8888_process_tail cond, numbytes, firstreg
WK4 .req Y
WK5 .req STRIDE_D
WK6 .req STRIDE_S
WK7 .req ORIG_W
over_8888_8888_check_transparent numbytes, %(4+(firstreg%2)), %(5+(firstreg%2)), %(6+firstreg), %(7+firstreg)
beq 10f
mov WK6, #255
.set PROCESS_REG, firstreg
.rept numbytes / 4
.if numbytes == 16 && PROCESS_REG == 2
/* We're using WK6 and WK7 as temporaries, so half way through
* 4 pixels, reload the second two source pixels but this time
* into WK4 and WK5 */
ldmdb SRC, {WK4, WK5}
.endif
over_8888_n_8888_1pixel %(4+(PROCESS_REG%2)), %(PROCESS_REG)
.set PROCESS_REG, PROCESS_REG+1
.endr
pixst , numbytes, firstreg, DST
10:
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
generate_composite_function \
pixman_composite_over_8888_n_8888_asm_armv6, 32, 0, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \
2, /* prefetch distance */ \
over_8888_n_8888_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
over_8888_n_8888_process_head, \
over_8888_n_8888_process_tail
/******************************************************************************/
.macro over_n_8_8888_init
/* Source is constant, but splitting it into even/odd bytes is a loop invariant */
ldr SRC, [sp, #ARGS_STACK_OFFSET]
/* Not enough registers to hold this constant, but we still use it here to set GE[3:0] */
ldr SCRATCH, =0x00800080
uxtb16 STRIDE_S, SRC
uxtb16 SRC, SRC, ror #8
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, SCRATCH, SCRATCH
line_saved_regs Y, STRIDE_D, STRIDE_M, ORIG_W
.endm
.macro over_n_8_8888_newline
ldr STRIDE_D, =0x00800080
b 1f
.ltorg
1:
.endm
.macro over_n_8_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
WK4 .req STRIDE_M
pixld , numbytes/4, 4, MASK, unaligned_mask
pixld , numbytes, firstreg, DST, 0
.unreq WK4
.endm
.macro over_n_8_8888_1pixel src, dst
uxtb Y, WK4, ror #src*8
/* Trailing part of multiplication of source */
mla SCRATCH, STRIDE_S, Y, STRIDE_D
mla Y, SRC, Y, STRIDE_D
mov ORIG_W, #255
uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8
uxtab16 Y, Y, Y, ror #8
mov SCRATCH, SCRATCH, ror #8
sub ORIG_W, ORIG_W, Y, lsr #24
sel Y, SCRATCH, Y
/* Then multiply the destination */
mul_8888_8 WK&dst, ORIG_W, SCRATCH, STRIDE_D
uqadd8 WK&dst, WK&dst, Y
.endm
.macro over_n_8_8888_process_tail cond, numbytes, firstreg
WK4 .req STRIDE_M
teq WK4, #0
beq 10f
.set PROCESS_REG, firstreg
.rept numbytes / 4
over_n_8_8888_1pixel %(PROCESS_REG-firstreg), %(PROCESS_REG)
.set PROCESS_REG, PROCESS_REG+1
.endr
pixst , numbytes, firstreg, DST
10:
.unreq WK4
.endm
generate_composite_function \
pixman_composite_over_n_8_8888_asm_armv6, 0, 8, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \
2, /* prefetch distance */ \
over_n_8_8888_init, \
over_n_8_8888_newline, \
nop_macro, /* cleanup */ \
over_n_8_8888_process_head, \
over_n_8_8888_process_tail
/******************************************************************************/
.macro over_reverse_n_8888_init
ldr SRC, [sp, #ARGS_STACK_OFFSET]
ldr MASK, =0x00800080
/* Split source pixel into RB/AG parts */
uxtb16 STRIDE_S, SRC
uxtb16 STRIDE_M, SRC, ror #8
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, MASK, MASK
line_saved_regs STRIDE_D, ORIG_W
.endm
.macro over_reverse_n_8888_newline
mov STRIDE_D, #0xFF
.endm
.macro over_reverse_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
pixld , numbytes, firstreg, DST, 0
.endm
.macro over_reverse_n_8888_1pixel d, is_only
teq WK&d, #0
beq 8f /* replace with source */
bics ORIG_W, STRIDE_D, WK&d, lsr #24
.if is_only == 1
beq 49f /* skip store */
.else
beq 9f /* write same value back */
.endif
mla SCRATCH, STRIDE_S, ORIG_W, MASK /* red/blue */
mla ORIG_W, STRIDE_M, ORIG_W, MASK /* alpha/green */
uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8
uxtab16 ORIG_W, ORIG_W, ORIG_W, ror #8
mov SCRATCH, SCRATCH, ror #8
sel ORIG_W, SCRATCH, ORIG_W
uqadd8 WK&d, WK&d, ORIG_W
b 9f
8: mov WK&d, SRC
9:
.endm
.macro over_reverse_n_8888_tail numbytes, reg1, reg2, reg3, reg4
.if numbytes == 4
over_reverse_n_8888_1pixel reg1, 1
.else
and SCRATCH, WK®1, WK®2
.if numbytes == 16
and SCRATCH, SCRATCH, WK®3
and SCRATCH, SCRATCH, WK®4
.endif
mvns SCRATCH, SCRATCH, asr #24
beq 49f /* skip store if all opaque */
over_reverse_n_8888_1pixel reg1, 0
over_reverse_n_8888_1pixel reg2, 0
.if numbytes == 16
over_reverse_n_8888_1pixel reg3, 0
over_reverse_n_8888_1pixel reg4, 0
.endif
.endif
pixst , numbytes, reg1, DST
49:
.endm
.macro over_reverse_n_8888_process_tail cond, numbytes, firstreg
over_reverse_n_8888_tail numbytes, firstreg, %(firstreg+1), %(firstreg+2), %(firstreg+3)
.endm
generate_composite_function \
pixman_composite_over_reverse_n_8888_asm_armv6, 0, 0, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH, \
3, /* prefetch distance */ \
over_reverse_n_8888_init, \
over_reverse_n_8888_newline, \
nop_macro, /* cleanup */ \
over_reverse_n_8888_process_head, \
over_reverse_n_8888_process_tail
/******************************************************************************/
.macro over_white_8888_8888_ca_init
HALF .req SRC
TMP0 .req STRIDE_D
TMP1 .req STRIDE_S
TMP2 .req STRIDE_M
TMP3 .req ORIG_W
WK4 .req SCRATCH
line_saved_regs STRIDE_D, STRIDE_M, ORIG_W
ldr SCRATCH, =0x800080
mov HALF, #0x80
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, SCRATCH, SCRATCH
.set DST_PRELOAD_BIAS, 8
.endm
.macro over_white_8888_8888_ca_cleanup
.set DST_PRELOAD_BIAS, 0
.unreq HALF
.unreq TMP0
.unreq TMP1
.unreq TMP2
.unreq TMP3
.unreq WK4
.endm
.macro over_white_8888_8888_ca_combine m, d
uxtb16 TMP1, TMP0 /* rb_notmask */
uxtb16 TMP2, d /* rb_dest; 1 stall follows */
smlatt TMP3, TMP2, TMP1, HALF /* red */
smlabb TMP2, TMP2, TMP1, HALF /* blue */
uxtb16 TMP0, TMP0, ror #8 /* ag_notmask */
uxtb16 TMP1, d, ror #8 /* ag_dest; 1 stall follows */
smlatt d, TMP1, TMP0, HALF /* alpha */
smlabb TMP1, TMP1, TMP0, HALF /* green */
pkhbt TMP0, TMP2, TMP3, lsl #16 /* rb; 1 stall follows */
pkhbt TMP1, TMP1, d, lsl #16 /* ag */
uxtab16 TMP0, TMP0, TMP0, ror #8
uxtab16 TMP1, TMP1, TMP1, ror #8
mov TMP0, TMP0, ror #8
sel d, TMP0, TMP1
uqadd8 d, d, m /* d is a late result */
.endm
.macro over_white_8888_8888_ca_1pixel_head
pixld , 4, 1, MASK, 0
pixld , 4, 3, DST, 0
.endm
.macro over_white_8888_8888_ca_1pixel_tail
mvn TMP0, WK1
teq WK1, WK1, asr #32
bne 01f
bcc 03f
mov WK3, WK1
b 02f
01: over_white_8888_8888_ca_combine WK1, WK3
02: pixst , 4, 3, DST
03:
.endm
.macro over_white_8888_8888_ca_2pixels_head
pixld , 8, 1, MASK, 0
.endm
.macro over_white_8888_8888_ca_2pixels_tail
pixld , 8, 3, DST
mvn TMP0, WK1
teq WK1, WK1, asr #32
bne 01f
movcs WK3, WK1
bcs 02f
teq WK2, #0
beq 05f
b 02f
01: over_white_8888_8888_ca_combine WK1, WK3
02: mvn TMP0, WK2
teq WK2, WK2, asr #32
bne 03f
movcs WK4, WK2
b 04f
03: over_white_8888_8888_ca_combine WK2, WK4
04: pixst , 8, 3, DST
05:
.endm
.macro over_white_8888_8888_ca_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
.if numbytes == 4
over_white_8888_8888_ca_1pixel_head
.else
.if numbytes == 16
over_white_8888_8888_ca_2pixels_head
over_white_8888_8888_ca_2pixels_tail
.endif
over_white_8888_8888_ca_2pixels_head
.endif
.endm
.macro over_white_8888_8888_ca_process_tail cond, numbytes, firstreg
.if numbytes == 4
over_white_8888_8888_ca_1pixel_tail
.else
over_white_8888_8888_ca_2pixels_tail
.endif
.endm
generate_composite_function \
pixman_composite_over_white_8888_8888_ca_asm_armv6, 0, 32, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH \
2, /* prefetch distance */ \
over_white_8888_8888_ca_init, \
nop_macro, /* newline */ \
over_white_8888_8888_ca_cleanup, \
over_white_8888_8888_ca_process_head, \
over_white_8888_8888_ca_process_tail
.macro over_n_8888_8888_ca_init
/* Set up constants. RB_SRC and AG_SRC are in registers;
* RB_FLDS, A_SRC, and the two HALF values need to go on the
* stack (and the ful SRC value is already there) */
ldr SCRATCH, [sp, #ARGS_STACK_OFFSET]
mov WK0, #0x00FF0000
orr WK0, WK0, #0xFF /* RB_FLDS (0x00FF00FF) */
mov WK1, #0x80 /* HALF default value */
mov WK2, SCRATCH, lsr #24 /* A_SRC */
orr WK3, WK1, WK1, lsl #16 /* HALF alternate value (0x00800080) */
push {WK0-WK3}
.set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET+16
uxtb16 SRC, SCRATCH
uxtb16 STRIDE_S, SCRATCH, ror #8
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, WK3, WK3
.unreq WK0
.unreq WK1
.unreq WK2
.unreq WK3
WK0 .req Y
WK1 .req STRIDE_D
RB_SRC .req SRC
AG_SRC .req STRIDE_S
WK2 .req STRIDE_M
RB_FLDS .req r8 /* the reloaded constants have to be at consecutive registers starting at an even one */
A_SRC .req r8
HALF .req r9
WK3 .req r10
WK4 .req r11
WK5 .req SCRATCH
WK6 .req ORIG_W
line_saved_regs Y, STRIDE_D, STRIDE_M, ORIG_W
.endm
.macro over_n_8888_8888_ca_cleanup
add sp, sp, #16
.set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET-16
.unreq WK0
.unreq WK1
.unreq RB_SRC
.unreq AG_SRC
.unreq WK2
.unreq RB_FLDS
.unreq A_SRC
.unreq HALF
.unreq WK3
.unreq WK4
.unreq WK5
.unreq WK6
WK0 .req r8
WK1 .req r9
WK2 .req r10
WK3 .req r11
.endm
.macro over_n_8888_8888_ca_1pixel_head
pixld , 4, 6, MASK, 0
pixld , 4, 0, DST, 0
.endm
.macro over_n_8888_8888_ca_1pixel_tail
ldrd A_SRC, HALF, [sp, #LOCALS_STACK_OFFSET+8]
uxtb16 WK1, WK6 /* rb_mask (first step of hard case placed in what would otherwise be a stall) */
teq WK6, WK6, asr #32 /* Zc if transparent, ZC if opaque */
bne 20f
bcc 40f
/* Mask is fully opaque (all channels) */
ldr WK6, [sp, #ARGS_STACK_OFFSET] /* get SRC back */
eors A_SRC, A_SRC, #0xFF
bne 10f
/* Source is also opaque - same as src_8888_8888 */
mov WK0, WK6
b 30f
10: /* Same as over_8888_8888 */
mul_8888_8 WK0, A_SRC, WK5, HALF
uqadd8 WK0, WK0, WK6
b 30f
20: /* No simplifications possible - do it the hard way */
uxtb16 WK2, WK6, ror #8 /* ag_mask */
mla WK3, WK1, A_SRC, HALF /* rb_mul; 2 cycles */
mla WK4, WK2, A_SRC, HALF /* ag_mul; 2 cycles */
ldrd RB_FLDS, HALF, [sp, #LOCALS_STACK_OFFSET]
uxtb16 WK5, WK0 /* rb_dest */
uxtab16 WK3, WK3, WK3, ror #8
uxtb16 WK6, WK0, ror #8 /* ag_dest */
uxtab16 WK4, WK4, WK4, ror #8
smlatt WK0, RB_SRC, WK1, HALF /* red1 */
smlabb WK1, RB_SRC, WK1, HALF /* blue1 */
bic WK3, RB_FLDS, WK3, lsr #8
bic WK4, RB_FLDS, WK4, lsr #8
pkhbt WK1, WK1, WK0, lsl #16 /* rb1 */
smlatt WK0, WK5, WK3, HALF /* red2 */
smlabb WK3, WK5, WK3, HALF /* blue2 */
uxtab16 WK1, WK1, WK1, ror #8
smlatt WK5, AG_SRC, WK2, HALF /* alpha1 */
pkhbt WK3, WK3, WK0, lsl #16 /* rb2 */
smlabb WK0, AG_SRC, WK2, HALF /* green1 */
smlatt WK2, WK6, WK4, HALF /* alpha2 */
smlabb WK4, WK6, WK4, HALF /* green2 */
pkhbt WK0, WK0, WK5, lsl #16 /* ag1 */
uxtab16 WK3, WK3, WK3, ror #8
pkhbt WK4, WK4, WK2, lsl #16 /* ag2 */
uxtab16 WK0, WK0, WK0, ror #8
uxtab16 WK4, WK4, WK4, ror #8
mov WK1, WK1, ror #8
mov WK3, WK3, ror #8
sel WK2, WK1, WK0 /* recombine source*mask */
sel WK1, WK3, WK4 /* recombine dest*(1-source_alpha*mask) */
uqadd8 WK0, WK1, WK2 /* followed by 1 stall */
30: /* The destination buffer is already in the L1 cache, so
* there's little point in amalgamating writes */
pixst , 4, 0, DST
40:
.endm
.macro over_n_8888_8888_ca_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
.rept (numbytes / 4) - 1
over_n_8888_8888_ca_1pixel_head
over_n_8888_8888_ca_1pixel_tail
.endr
over_n_8888_8888_ca_1pixel_head
.endm
.macro over_n_8888_8888_ca_process_tail cond, numbytes, firstreg
over_n_8888_8888_ca_1pixel_tail
.endm
pixman_asm_function pixman_composite_over_n_8888_8888_ca_asm_armv6
ldr ip, [sp]
cmp ip, #-1
beq pixman_composite_over_white_8888_8888_ca_asm_armv6
/* else drop through... */
.endfunc
generate_composite_function \
pixman_composite_over_n_8888_8888_ca_asm_armv6_helper, 0, 32, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH | FLAG_PROCESS_CORRUPTS_WK0 \
2, /* prefetch distance */ \
over_n_8888_8888_ca_init, \
nop_macro, /* newline */ \
over_n_8888_8888_ca_cleanup, \
over_n_8888_8888_ca_process_head, \
over_n_8888_8888_ca_process_tail
/******************************************************************************/
.macro in_reverse_8888_8888_init
/* Hold loop invariant in MASK */
ldr MASK, =0x00800080
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, MASK, MASK
/* Offset the source pointer: we only need the alpha bytes */
add SRC, SRC, #3
line_saved_regs ORIG_W
.endm
.macro in_reverse_8888_8888_head numbytes, reg1, reg2, reg3
ldrb ORIG_W, [SRC], #4
.if numbytes >= 8
ldrb WK®1, [SRC], #4
.if numbytes == 16
ldrb WK®2, [SRC], #4
ldrb WK®3, [SRC], #4
.endif
.endif
add DST, DST, #numbytes
.endm
.macro in_reverse_8888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
in_reverse_8888_8888_head numbytes, firstreg, %(firstreg+1), %(firstreg+2)
.endm
.macro in_reverse_8888_8888_1pixel s, d, offset, is_only
.if is_only != 1
movs s, ORIG_W
.if offset != 0
ldrb ORIG_W, [SRC, #offset]
.endif
beq 01f
teq STRIDE_M, #0xFF
beq 02f
.endif
uxtb16 SCRATCH, d /* rb_dest */
uxtb16 d, d, ror #8 /* ag_dest */
mla SCRATCH, SCRATCH, s, MASK
mla d, d, s, MASK
uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8
uxtab16 d, d, d, ror #8
mov SCRATCH, SCRATCH, ror #8
sel d, SCRATCH, d
b 02f
.if offset == 0
48: /* Last mov d,#0 of the set - used as part of shortcut for
* source values all 0 */
.endif
01: mov d, #0
02:
.endm
.macro in_reverse_8888_8888_tail numbytes, reg1, reg2, reg3, reg4
.if numbytes == 4
teq ORIG_W, ORIG_W, asr #32
ldrne WK®1, [DST, #-4]
.elseif numbytes == 8
teq ORIG_W, WK®1
teqeq ORIG_W, ORIG_W, asr #32 /* all 0 or all -1? */
ldmnedb DST, {WK®1-WK®2}
.else
teq ORIG_W, WK®1
teqeq ORIG_W, WK®2
teqeq ORIG_W, WK®3
teqeq ORIG_W, ORIG_W, asr #32 /* all 0 or all -1? */
ldmnedb DST, {WK®1-WK®4}
.endif
cmnne DST, #0 /* clear C if NE */
bcs 49f /* no writes to dest if source all -1 */
beq 48f /* set dest to all 0 if source all 0 */
.if numbytes == 4
in_reverse_8888_8888_1pixel ORIG_W, WK®1, 0, 1
str WK®1, [DST, #-4]
.elseif numbytes == 8
in_reverse_8888_8888_1pixel STRIDE_M, WK®1, -4, 0
in_reverse_8888_8888_1pixel STRIDE_M, WK®2, 0, 0
stmdb DST, {WK®1-WK®2}
.else
in_reverse_8888_8888_1pixel STRIDE_M, WK®1, -12, 0
in_reverse_8888_8888_1pixel STRIDE_M, WK®2, -8, 0
in_reverse_8888_8888_1pixel STRIDE_M, WK®3, -4, 0
in_reverse_8888_8888_1pixel STRIDE_M, WK®4, 0, 0
stmdb DST, {WK®1-WK®4}
.endif
49:
.endm
.macro in_reverse_8888_8888_process_tail cond, numbytes, firstreg
in_reverse_8888_8888_tail numbytes, firstreg, %(firstreg+1), %(firstreg+2), %(firstreg+3)
.endm
generate_composite_function \
pixman_composite_in_reverse_8888_8888_asm_armv6, 32, 0, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH | FLAG_NO_PRELOAD_DST \
2, /* prefetch distance */ \
in_reverse_8888_8888_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
in_reverse_8888_8888_process_head, \
in_reverse_8888_8888_process_tail
/******************************************************************************/
.macro over_n_8888_init
ldr SRC, [sp, #ARGS_STACK_OFFSET]
/* Hold loop invariant in MASK */
ldr MASK, =0x00800080
/* Hold multiplier for destination in STRIDE_M */
mov STRIDE_M, #255
sub STRIDE_M, STRIDE_M, SRC, lsr #24
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
uadd8 SCRATCH, MASK, MASK
.endm
.macro over_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
pixld , numbytes, firstreg, DST, 0
.endm
.macro over_n_8888_1pixel dst
mul_8888_8 WK&dst, STRIDE_M, SCRATCH, MASK
uqadd8 WK&dst, WK&dst, SRC
.endm
.macro over_n_8888_process_tail cond, numbytes, firstreg
.set PROCESS_REG, firstreg
.rept numbytes / 4
over_n_8888_1pixel %(PROCESS_REG)
.set PROCESS_REG, PROCESS_REG+1
.endr
pixst , numbytes, firstreg, DST
.endm
generate_composite_function \
pixman_composite_over_n_8888_asm_armv6, 0, 0, 32 \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_DOES_STORE \
2, /* prefetch distance */ \
over_n_8888_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
over_n_8888_process_head, \
over_n_8888_process_tail
/******************************************************************************/
|
iMAGRAY/Shelldone | 128,848 | deps/cairo/pixman/pixman/pixman-arm-neon-asm.S | /*
* Copyright © 2009 Nokia Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
*/
/*
* This file contains implementations of NEON optimized pixel processing
* functions. There is no full and detailed tutorial, but some functions
* (those which are exposing some new or interesting features) are
* extensively commented and can be used as examples.
*
* You may want to have a look at the comments for following functions:
* - pixman_composite_over_8888_0565_asm_neon
* - pixman_composite_over_n_8_0565_asm_neon
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0 /* suppress Tag_FP_arch */
.eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */
.arm
.altmacro
.p2align 2
#include "pixman-private.h"
#include "pixman-arm-asm.h"
#include "pixman-arm-neon-asm.h"
/* Global configuration options and preferences */
/*
* The code can optionally make use of unaligned memory accesses to improve
* performance of handling leading/trailing pixels for each scanline.
* Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
* example in linux if unaligned memory accesses are not configured to
* generate.exceptions.
*/
.set RESPECT_STRICT_ALIGNMENT, 1
/*
* Set default prefetch type. There is a choice between the following options:
*
* PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
* as NOP to workaround some HW bugs or for whatever other reason)
*
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
* advanced prefetch intruduces heavy overhead)
*
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
* which can run ARM and NEON instructions simultaneously so that extra ARM
* instructions do not add (many) extra cycles, but improve prefetch efficiency)
*
* Note: some types of function can't support advanced prefetch and fallback
* to simple one (those which handle 24bpp pixels)
*/
.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
/* Prefetch distance in pixels for simple prefetch */
.set PREFETCH_DISTANCE_SIMPLE, 64
/*
* Implementation of pixman_composite_over_8888_0565_asm_neon
*
* This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
* performs OVER compositing operation. Function fast_composite_over_8888_0565
* from pixman-fast-path.c does the same in C and can be used as a reference.
*
* First we need to have some NEON assembly code which can do the actual
* operation on the pixels and provide it to the template macro.
*
* Template macro quite conveniently takes care of emitting all the necessary
* code for memory reading and writing (including quite tricky cases of
* handling unaligned leading/trailing pixels), so we only need to deal with
* the data in NEON registers.
*
* NEON registers allocation in general is recommented to be the following:
* d0, d1, d2, d3 - contain loaded source pixel data
* d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
* d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
* d28, d29, d30, d31 - place for storing the result (destination pixels)
*
* As can be seen above, four 64-bit NEON registers are used for keeping
* intermediate pixel data and up to 8 pixels can be processed in one step
* for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
*
* This particular function uses the following registers allocation:
* d0, d1, d2, d3 - contain loaded source pixel data
* d4, d5 - contain loaded destination pixels (they are needed)
* d28, d29 - place for storing the result (destination pixels)
*/
/*
* Step one. We need to have some code to do some arithmetics on pixel data.
* This is implemented as a pair of macros: '*_head' and '*_tail'. When used
* back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
* perform all the needed calculations and write the result to {d28, d29}.
* The rationale for having two macros and not just one will be explained
* later. In practice, any single monolitic function which does the work can
* be split into two parts in any arbitrary way without affecting correctness.
*
* There is one special trick here too. Common template macro can optionally
* make our life a bit easier by doing R, G, B, A color components
* deinterleaving for 32bpp pixel formats (and this feature is used in
* 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
* instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
* actually use d0 register for blue channel (a vector of eight 8-bit
* values), d1 register for green, d2 for red and d3 for alpha. This
* simple conversion can be also done with a few NEON instructions:
*
* Packed to planar conversion:
* vuzp.8 d0, d1
* vuzp.8 d2, d3
* vuzp.8 d1, d3
* vuzp.8 d0, d2
*
* Planar to packed conversion:
* vzip.8 d0, d2
* vzip.8 d1, d3
* vzip.8 d2, d3
* vzip.8 d0, d1
*
* But pixel can be loaded directly in planar format using VLD4.8 NEON
* instruction. It is 1 cycle slower than VLD1.32, so this is not always
* desirable, that's why deinterleaving is optional.
*
* But anyway, here is the code:
*/
.macro pixman_composite_over_8888_0565_process_pixblock_head
/* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
and put data into d6 - red, d7 - green, d30 - blue */
vshrn.u16 d6, q2, #8
vshrn.u16 d7, q2, #3
vsli.u16 q2, q2, #5
vsri.u8 d6, d6, #5
vmvn.8 d3, d3 /* invert source alpha */
vsri.u8 d7, d7, #6
vshrn.u16 d30, q2, #2
/* now do alpha blending, storing results in 8-bit planar format
into d16 - red, d19 - green, d18 - blue */
vmull.u8 q10, d3, d6
vmull.u8 q11, d3, d7
vmull.u8 q12, d3, d30
vrshr.u16 q13, q10, #8
vrshr.u16 q3, q11, #8
vrshr.u16 q15, q12, #8
vraddhn.u16 d20, q10, q13
vraddhn.u16 d23, q11, q3
vraddhn.u16 d22, q12, q15
.endm
.macro pixman_composite_over_8888_0565_process_pixblock_tail
/* ... continue alpha blending */
vqadd.u8 d16, d2, d20
vqadd.u8 q9, q0, q11
/* convert the result to r5g6b5 and store it into {d28, d29} */
vshll.u8 q14, d16, #8
vshll.u8 q8, d19, #8
vshll.u8 q9, d18, #8
vsri.u16 q14, q8, #5
vsri.u16 q14, q9, #11
.endm
/*
* OK, now we got almost everything that we need. Using the above two
* macros, the work can be done right. But now we want to optimize
* it a bit. ARM Cortex-A8 is an in-order core, and benefits really
* a lot from good code scheduling and software pipelining.
*
* Let's construct some code, which will run in the core main loop.
* Some pseudo-code of the main loop will look like this:
* head
* while (...) {
* tail
* head
* }
* tail
*
* It may look a bit weird, but this setup allows to hide instruction
* latencies better and also utilize dual-issue capability more
* efficiently (make pairs of load-store and ALU instructions).
*
* So what we need now is a '*_tail_head' macro, which will be used
* in the core main loop. A trivial straightforward implementation
* of this macro would look like this:
*
* pixman_composite_over_8888_0565_process_pixblock_tail
* vst1.16 {d28, d29}, [DST_W, :128]!
* vld1.16 {d4, d5}, [DST_R, :128]!
* vld4.32 {d0, d1, d2, d3}, [SRC]!
* pixman_composite_over_8888_0565_process_pixblock_head
* cache_preload 8, 8
*
* Now it also got some VLD/VST instructions. We simply can't move from
* processing one block of pixels to the other one with just arithmetics.
* The previously processed data needs to be written to memory and new
* data needs to be fetched. Fortunately, this main loop does not deal
* with partial leading/trailing pixels and can load/store a full block
* of pixels in a bulk. Additionally, destination buffer is already
* 16 bytes aligned here (which is good for performance).
*
* New things here are DST_R, DST_W, SRC and MASK identifiers. These
* are the aliases for ARM registers which are used as pointers for
* accessing data. We maintain separate pointers for reading and writing
* destination buffer (DST_R and DST_W).
*
* Another new thing is 'cache_preload' macro. It is used for prefetching
* data into CPU L2 cache and improve performance when dealing with large
* images which are far larger than cache size. It uses one argument
* (actually two, but they need to be the same here) - number of pixels
* in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
* details about this macro. Moreover, if good performance is needed
* the code from this macro needs to be copied into '*_tail_head' macro
* and mixed with the rest of code for optimal instructions scheduling.
* We are actually doing it below.
*
* Now after all the explanations, here is the optimized code.
* Different instruction streams (originaling from '*_head', '*_tail'
* and 'cache_preload' macro) use different indentation levels for
* better readability. Actually taking the code from one of these
* indentation levels and ignoring a few VLD/VST instructions would
* result in exactly the code from '*_head', '*_tail' or 'cache_preload'
* macro!
*/
#if 1
.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
vqadd.u8 d16, d2, d20
vld1.16 {d4, d5}, [DST_R, :128]!
vqadd.u8 q9, q0, q11
vshrn.u16 d6, q2, #8
fetch_src_pixblock
vshrn.u16 d7, q2, #3
vsli.u16 q2, q2, #5
vshll.u8 q14, d16, #8
PF add PF_X, PF_X, #8
vshll.u8 q8, d19, #8
PF tst PF_CTL, #0xF
vsri.u8 d6, d6, #5
PF addne PF_X, PF_X, #8
vmvn.8 d3, d3
PF subne PF_CTL, PF_CTL, #1
vsri.u8 d7, d7, #6
vshrn.u16 d30, q2, #2
vmull.u8 q10, d3, d6
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vmull.u8 q11, d3, d7
vmull.u8 q12, d3, d30
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vsri.u16 q14, q8, #5
PF cmp PF_X, ORIG_W
vshll.u8 q9, d18, #8
vrshr.u16 q13, q10, #8
PF subge PF_X, PF_X, ORIG_W
vrshr.u16 q3, q11, #8
vrshr.u16 q15, q12, #8
PF subges PF_CTL, PF_CTL, #0x10
vsri.u16 q14, q9, #11
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vraddhn.u16 d20, q10, q13
vraddhn.u16 d23, q11, q3
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vraddhn.u16 d22, q12, q15
vst1.16 {d28, d29}, [DST_W, :128]!
.endm
#else
/* If we did not care much about the performance, we would just use this... */
.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
pixman_composite_over_8888_0565_process_pixblock_tail
vst1.16 {d28, d29}, [DST_W, :128]!
vld1.16 {d4, d5}, [DST_R, :128]!
fetch_src_pixblock
pixman_composite_over_8888_0565_process_pixblock_head
cache_preload 8, 8
.endm
#endif
/*
* And now the final part. We are using 'generate_composite_function' macro
* to put all the stuff together. We are specifying the name of the function
* which we want to get, number of bits per pixel for the source, mask and
* destination (0 if unused, like mask in this case). Next come some bit
* flags:
* FLAG_DST_READWRITE - tells that the destination buffer is both read
* and written, for write-only buffer we would use
* FLAG_DST_WRITEONLY flag instead
* FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
* and separate color channels for 32bpp format.
* The next things are:
* - the number of pixels processed per iteration (8 in this case, because
* that's the maximum what can fit into four 64-bit NEON registers).
* - prefetch distance, measured in pixel blocks. In this case it is 5 times
* by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
* prefetch distance can be selected by running some benchmarks.
*
* After that we specify some macros, these are 'default_init',
* 'default_cleanup' here which are empty (but it is possible to have custom
* init/cleanup macros to be able to save/restore some extra NEON registers
* like d8-d15 or do anything else) followed by
* 'pixman_composite_over_8888_0565_process_pixblock_head',
* 'pixman_composite_over_8888_0565_process_pixblock_tail' and
* 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
* which we got implemented above.
*
* The last part is the NEON registers allocation scheme.
*/
generate_composite_function \
pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_0565_process_pixblock_head, \
pixman_composite_over_8888_0565_process_pixblock_tail, \
pixman_composite_over_8888_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_n_0565_process_pixblock_head
/* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
and put data into d6 - red, d7 - green, d30 - blue */
vshrn.u16 d6, q2, #8
vshrn.u16 d7, q2, #3
vsli.u16 q2, q2, #5
vsri.u8 d6, d6, #5
vsri.u8 d7, d7, #6
vshrn.u16 d30, q2, #2
/* now do alpha blending, storing results in 8-bit planar format
into d16 - red, d19 - green, d18 - blue */
vmull.u8 q10, d3, d6
vmull.u8 q11, d3, d7
vmull.u8 q12, d3, d30
vrshr.u16 q13, q10, #8
vrshr.u16 q3, q11, #8
vrshr.u16 q15, q12, #8
vraddhn.u16 d20, q10, q13
vraddhn.u16 d23, q11, q3
vraddhn.u16 d22, q12, q15
.endm
.macro pixman_composite_over_n_0565_process_pixblock_tail
/* ... continue alpha blending */
vqadd.u8 d16, d2, d20
vqadd.u8 q9, q0, q11
/* convert the result to r5g6b5 and store it into {d28, d29} */
vshll.u8 q14, d16, #8
vshll.u8 q8, d19, #8
vshll.u8 q9, d18, #8
vsri.u16 q14, q8, #5
vsri.u16 q14, q9, #11
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_n_0565_process_pixblock_tail_head
pixman_composite_over_n_0565_process_pixblock_tail
vld1.16 {d4, d5}, [DST_R, :128]!
vst1.16 {d28, d29}, [DST_W, :128]!
pixman_composite_over_n_0565_process_pixblock_head
cache_preload 8, 8
.endm
.macro pixman_composite_over_n_0565_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d0, d3[0]
vdup.8 d1, d3[1]
vdup.8 d2, d3[2]
vdup.8 d3, d3[3]
vmvn.8 d3, d3 /* invert source alpha */
.endm
generate_composite_function \
pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_0565_init, \
default_cleanup, \
pixman_composite_over_n_0565_process_pixblock_head, \
pixman_composite_over_n_0565_process_pixblock_tail, \
pixman_composite_over_n_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_8888_0565_process_pixblock_head
vshll.u8 q8, d1, #8
vshll.u8 q14, d2, #8
vshll.u8 q9, d0, #8
.endm
.macro pixman_composite_src_8888_0565_process_pixblock_tail
vsri.u16 q14, q8, #5
vsri.u16 q14, q9, #11
.endm
.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
vsri.u16 q14, q8, #5
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
fetch_src_pixblock
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vsri.u16 q14, q9, #11
PF cmp PF_X, ORIG_W
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vshll.u8 q8, d1, #8
vst1.16 {d28, d29}, [DST_W, :128]!
PF subge PF_X, PF_X, ORIG_W
PF subges PF_CTL, PF_CTL, #0x10
vshll.u8 q14, d2, #8
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vshll.u8 q9, d0, #8
.endm
generate_composite_function \
pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_0565_process_pixblock_head, \
pixman_composite_src_8888_0565_process_pixblock_tail, \
pixman_composite_src_8888_0565_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_src_0565_8888_process_pixblock_head
vshrn.u16 d30, q0, #8
vshrn.u16 d29, q0, #3
vsli.u16 q0, q0, #5
vmov.u8 d31, #255
vsri.u8 d30, d30, #5
vsri.u8 d29, d29, #6
vshrn.u16 d28, q0, #2
.endm
.macro pixman_composite_src_0565_8888_process_pixblock_tail
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_src_0565_8888_process_pixblock_tail_head
pixman_composite_src_0565_8888_process_pixblock_tail
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
fetch_src_pixblock
pixman_composite_src_0565_8888_process_pixblock_head
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0565_8888_process_pixblock_head, \
pixman_composite_src_0565_8888_process_pixblock_tail, \
pixman_composite_src_0565_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8_8_process_pixblock_head
vqadd.u8 q14, q0, q2
vqadd.u8 q15, q1, q3
.endm
.macro pixman_composite_add_8_8_process_pixblock_tail
.endm
.macro pixman_composite_add_8_8_process_pixblock_tail_head
fetch_src_pixblock
PF add PF_X, PF_X, #32
PF tst PF_CTL, #0xF
vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
PF addne PF_X, PF_X, #32
PF subne PF_CTL, PF_CTL, #1
vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF cmp PF_X, ORIG_W
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
PF subge PF_X, PF_X, ORIG_W
PF subges PF_CTL, PF_CTL, #0x10
vqadd.u8 q14, q0, q2
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vqadd.u8 q15, q1, q3
.endm
generate_composite_function \
pixman_composite_add_8_8_asm_neon, 8, 0, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8_8_process_pixblock_head, \
pixman_composite_add_8_8_process_pixblock_tail, \
pixman_composite_add_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8888_8888_process_pixblock_tail_head
fetch_src_pixblock
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
vld1.32 {d4, d5, d6, d7}, [DST_R, :128]!
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vst1.32 {d28, d29, d30, d31}, [DST_W, :128]!
PF cmp PF_X, ORIG_W
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
PF subge PF_X, PF_X, ORIG_W
PF subges PF_CTL, PF_CTL, #0x10
vqadd.u8 q14, q0, q2
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vqadd.u8 q15, q1, q3
.endm
generate_composite_function \
pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8_8_process_pixblock_head, \
pixman_composite_add_8_8_process_pixblock_tail, \
pixman_composite_add_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_add_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_add_8_8_process_pixblock_head, \
pixman_composite_add_8_8_process_pixblock_tail, \
pixman_composite_add_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_head
vmvn.8 d24, d3 /* get inverted alpha */
/* do alpha blending */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
.endm
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrshr.u16 q14, q8, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
PF cmp PF_X, ORIG_W
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
fetch_src_pixblock
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vmvn.8 d22, d3
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q8, d22, d4
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q9, d22, d5
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vmull.u8 q10, d22, d6
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vmull.u8 q11, d22, d7
.endm
generate_composite_function_single_scanline \
pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_out_reverse_8888_8888_process_pixblock_head, \
pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \
pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_8888_8888_process_pixblock_head
pixman_composite_out_reverse_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail
pixman_composite_out_reverse_8888_8888_process_pixblock_tail
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrshr.u16 q14, q8, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
PF cmp PF_X, ORIG_W
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
fetch_src_pixblock
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vmvn.8 d22, d3
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q8, d22, d4
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q9, d22, d5
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vmull.u8 q10, d22, d6
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vmull.u8 q11, d22, d7
.endm
generate_composite_function \
pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_process_pixblock_head
/* deinterleaved source pixels in {d0, d1, d2, d3} */
/* inverted alpha in {d24} */
/* destination pixels in {d4, d5, d6, d7} */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q2, q10, #8
vrshr.u16 q3, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q2, q10
vraddhn.u16 d31, q3, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail_head
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q2, q10, #8
vrshr.u16 q3, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q2, q10
vraddhn.u16 d31, q3, q11
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vqadd.u8 q14, q0, q14
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0x0F
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vqadd.u8 q15, q1, q15
PF cmp PF_X, ORIG_W
vmull.u8 q8, d24, d4
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vmull.u8 q9, d24, d5
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q10, d24, d6
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q11, d24, d7
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
.macro pixman_composite_over_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d0, d3[0]
vdup.8 d1, d3[1]
vdup.8 d2, d3[2]
vdup.8 d3, d3[3]
vmvn.8 d24, d3 /* get inverted alpha */
.endm
generate_composite_function \
pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_n_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head
vrshr.u16 q14, q8, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
PF cmp PF_X, ORIG_W
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
vld4.8 {d0, d1, d2, d3}, [DST_R, :128]!
vmvn.8 d22, d3
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q8, d22, d4
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q9, d22, d5
vmull.u8 q10, d22, d6
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vmull.u8 q11, d22, d7
.endm
.macro pixman_composite_over_reverse_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d7[0]}, [DUMMY]
vdup.8 d4, d7[0]
vdup.8 d5, d7[1]
vdup.8 d6, d7[2]
vdup.8 d7, d7[3]
.endm
generate_composite_function \
pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_reverse_n_8888_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
4, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_8888_8_0565_process_pixblock_head
vmull.u8 q0, d24, d8 /* IN for SRC pixels (part1) */
vmull.u8 q1, d24, d9
vmull.u8 q6, d24, d10
vmull.u8 q7, d24, d11
vshrn.u16 d6, q2, #8 /* convert DST_R data to 32-bpp (part1) */
vshrn.u16 d7, q2, #3
vsli.u16 q2, q2, #5
vrshr.u16 q8, q0, #8 /* IN for SRC pixels (part2) */
vrshr.u16 q9, q1, #8
vrshr.u16 q10, q6, #8
vrshr.u16 q11, q7, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q9
vraddhn.u16 d2, q6, q10
vraddhn.u16 d3, q7, q11
vsri.u8 d6, d6, #5 /* convert DST_R data to 32-bpp (part2) */
vsri.u8 d7, d7, #6
vmvn.8 d3, d3
vshrn.u16 d30, q2, #2
vmull.u8 q8, d3, d6 /* now do alpha blending */
vmull.u8 q9, d3, d7
vmull.u8 q10, d3, d30
.endm
.macro pixman_composite_over_8888_8_0565_process_pixblock_tail
/* 3 cycle bubble (after vmull.u8) */
vrshr.u16 q13, q8, #8
vrshr.u16 q11, q9, #8
vrshr.u16 q15, q10, #8
vraddhn.u16 d16, q8, q13
vraddhn.u16 d27, q9, q11
vraddhn.u16 d26, q10, q15
vqadd.u8 d16, d2, d16
/* 1 cycle bubble */
vqadd.u8 q9, q0, q13
vshll.u8 q14, d16, #8 /* convert to 16bpp */
vshll.u8 q8, d19, #8
vshll.u8 q9, d18, #8
vsri.u16 q14, q8, #5
/* 1 cycle bubble */
vsri.u16 q14, q9, #11
.endm
.macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head
vld1.16 {d4, d5}, [DST_R, :128]!
vshrn.u16 d6, q2, #8
fetch_mask_pixblock
vshrn.u16 d7, q2, #3
fetch_src_pixblock
vmull.u8 q6, d24, d10
vrshr.u16 q13, q8, #8
vrshr.u16 q11, q9, #8
vrshr.u16 q15, q10, #8
vraddhn.u16 d16, q8, q13
vraddhn.u16 d27, q9, q11
vraddhn.u16 d26, q10, q15
vqadd.u8 d16, d2, d16
vmull.u8 q1, d24, d9
vqadd.u8 q9, q0, q13
vshll.u8 q14, d16, #8
vmull.u8 q0, d24, d8
vshll.u8 q8, d19, #8
vshll.u8 q9, d18, #8
vsri.u16 q14, q8, #5
vmull.u8 q7, d24, d11
vsri.u16 q14, q9, #11
cache_preload 8, 8
vsli.u16 q2, q2, #5
vrshr.u16 q8, q0, #8
vrshr.u16 q9, q1, #8
vrshr.u16 q10, q6, #8
vrshr.u16 q11, q7, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q9
vraddhn.u16 d2, q6, q10
vraddhn.u16 d3, q7, q11
vsri.u8 d6, d6, #5
vsri.u8 d7, d7, #6
vmvn.8 d3, d3
vshrn.u16 d30, q2, #2
vst1.16 {d28, d29}, [DST_W, :128]!
vmull.u8 q8, d3, d6
vmull.u8 q9, d3, d7
vmull.u8 q10, d3, d30
.endm
generate_composite_function \
pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
/*
* This function needs a special initialization of solid mask.
* Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET
* offset, split into color components and replicated in d8-d11
* registers. Additionally, this function needs all the NEON registers,
* so it has to save d8-d15 registers which are callee saved according
* to ABI. These registers are restored from 'cleanup' macro. All the
* other NEON registers are caller saved, so can be clobbered freely
* without introducing any problems.
*/
.macro pixman_composite_over_n_8_0565_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d11[0]}, [DUMMY]
vdup.8 d8, d11[0]
vdup.8 d9, d11[1]
vdup.8 d10, d11[2]
vdup.8 d11, d11[3]
.endm
.macro pixman_composite_over_n_8_0565_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8_0565_init, \
pixman_composite_over_n_8_0565_cleanup, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_8888_n_0565_init
add DUMMY, sp, #(ARGS_STACK_OFFSET + 8)
vpush {d8-d15}
vld1.32 {d24[0]}, [DUMMY]
vdup.8 d24, d24[3]
.endm
.macro pixman_composite_over_8888_n_0565_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_8888_n_0565_init, \
pixman_composite_over_8888_n_0565_cleanup, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0565_0565_process_pixblock_head
.endm
.macro pixman_composite_src_0565_0565_process_pixblock_tail
.endm
.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
fetch_src_pixblock
cache_preload 16, 16
.endm
generate_composite_function \
pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
FLAG_DST_WRITEONLY, \
16, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0565_0565_process_pixblock_head, \
pixman_composite_src_0565_0565_process_pixblock_tail, \
pixman_composite_src_0565_0565_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_8_process_pixblock_head
.endm
.macro pixman_composite_src_n_8_process_pixblock_tail
.endm
.macro pixman_composite_src_n_8_process_pixblock_tail_head
vst1.8 {d0, d1, d2, d3}, [DST_W, :128]!
.endm
.macro pixman_composite_src_n_8_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d0[0]}, [DUMMY]
vsli.u64 d0, d0, #8
vsli.u64 d0, d0, #16
vsli.u64 d0, d0, #32
vorr d1, d0, d0
vorr q1, q0, q0
.endm
.macro pixman_composite_src_n_8_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
FLAG_DST_WRITEONLY, \
32, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_8_init, \
pixman_composite_src_n_8_cleanup, \
pixman_composite_src_n_8_process_pixblock_head, \
pixman_composite_src_n_8_process_pixblock_tail, \
pixman_composite_src_n_8_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_0565_process_pixblock_head
.endm
.macro pixman_composite_src_n_0565_process_pixblock_tail
.endm
.macro pixman_composite_src_n_0565_process_pixblock_tail_head
vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
.endm
.macro pixman_composite_src_n_0565_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d0[0]}, [DUMMY]
vsli.u64 d0, d0, #16
vsli.u64 d0, d0, #32
vorr d1, d0, d0
vorr q1, q0, q0
.endm
.macro pixman_composite_src_n_0565_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
FLAG_DST_WRITEONLY, \
16, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_0565_init, \
pixman_composite_src_n_0565_cleanup, \
pixman_composite_src_n_0565_process_pixblock_head, \
pixman_composite_src_n_0565_process_pixblock_tail, \
pixman_composite_src_n_0565_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_8888_process_pixblock_head
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
.endm
.macro pixman_composite_src_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d0[0]}, [DUMMY]
vsli.u64 d0, d0, #32
vorr d1, d0, d0
vorr q1, q0, q0
.endm
.macro pixman_composite_src_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_8888_init, \
pixman_composite_src_n_8888_cleanup, \
pixman_composite_src_n_8888_process_pixblock_head, \
pixman_composite_src_n_8888_process_pixblock_tail, \
pixman_composite_src_n_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
fetch_src_pixblock
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_8888_process_pixblock_head, \
pixman_composite_src_8888_8888_process_pixblock_tail, \
pixman_composite_src_8888_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_x888_8888_process_pixblock_head
vorr q0, q0, q2
vorr q1, q1, q2
.endm
.macro pixman_composite_src_x888_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_x888_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
fetch_src_pixblock
vorr q0, q0, q2
vorr q1, q1, q2
cache_preload 8, 8
.endm
.macro pixman_composite_src_x888_8888_init
vmov.u8 q2, #0xFF
vshl.u32 q2, q2, #24
.endm
generate_composite_function \
pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
pixman_composite_src_x888_8888_init, \
default_cleanup, \
pixman_composite_src_x888_8888_process_pixblock_head, \
pixman_composite_src_x888_8888_process_pixblock_tail, \
pixman_composite_src_x888_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_8_8888_process_pixblock_head
/* expecting solid source in {d0, d1, d2, d3} */
/* mask is in d24 (d25, d26, d27 are unused) */
/* in */
vmull.u8 q8, d24, d0
vmull.u8 q9, d24, d1
vmull.u8 q10, d24, d2
vmull.u8 q11, d24, d3
vrsra.u16 q8, q8, #8
vrsra.u16 q9, q9, #8
vrsra.u16 q10, q10, #8
vrsra.u16 q11, q11, #8
.endm
.macro pixman_composite_src_n_8_8888_process_pixblock_tail
vrshrn.u16 d28, q8, #8
vrshrn.u16 d29, q9, #8
vrshrn.u16 d30, q10, #8
vrshrn.u16 d31, q11, #8
.endm
.macro pixman_composite_src_n_8_8888_process_pixblock_tail_head
fetch_mask_pixblock
PF add PF_X, PF_X, #8
vrshrn.u16 d28, q8, #8
PF tst PF_CTL, #0x0F
vrshrn.u16 d29, q9, #8
PF addne PF_X, PF_X, #8
vrshrn.u16 d30, q10, #8
PF subne PF_CTL, PF_CTL, #1
vrshrn.u16 d31, q11, #8
PF cmp PF_X, ORIG_W
vmull.u8 q8, d24, d0
PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
vmull.u8 q9, d24, d1
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q10, d24, d2
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q11, d24, d3
PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
vrsra.u16 q8, q8, #8
vrsra.u16 q9, q9, #8
vrsra.u16 q10, q10, #8
vrsra.u16 q11, q11, #8
.endm
.macro pixman_composite_src_n_8_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d0, d3[0]
vdup.8 d1, d3[1]
vdup.8 d2, d3[2]
vdup.8 d3, d3[3]
.endm
.macro pixman_composite_src_n_8_8888_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_src_n_8_8888_init, \
pixman_composite_src_n_8_8888_cleanup, \
pixman_composite_src_n_8_8888_process_pixblock_head, \
pixman_composite_src_n_8_8888_process_pixblock_tail, \
pixman_composite_src_n_8_8888_process_pixblock_tail_head, \
/******************************************************************************/
.macro pixman_composite_src_n_8_8_process_pixblock_head
vmull.u8 q0, d24, d16
vmull.u8 q1, d25, d16
vmull.u8 q2, d26, d16
vmull.u8 q3, d27, d16
vrsra.u16 q0, q0, #8
vrsra.u16 q1, q1, #8
vrsra.u16 q2, q2, #8
vrsra.u16 q3, q3, #8
.endm
.macro pixman_composite_src_n_8_8_process_pixblock_tail
vrshrn.u16 d28, q0, #8
vrshrn.u16 d29, q1, #8
vrshrn.u16 d30, q2, #8
vrshrn.u16 d31, q3, #8
.endm
.macro pixman_composite_src_n_8_8_process_pixblock_tail_head
fetch_mask_pixblock
PF add PF_X, PF_X, #8
vrshrn.u16 d28, q0, #8
PF tst PF_CTL, #0x0F
vrshrn.u16 d29, q1, #8
PF addne PF_X, PF_X, #8
vrshrn.u16 d30, q2, #8
PF subne PF_CTL, PF_CTL, #1
vrshrn.u16 d31, q3, #8
PF cmp PF_X, ORIG_W
vmull.u8 q0, d24, d16
PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
vmull.u8 q1, d25, d16
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q2, d26, d16
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q3, d27, d16
PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
vrsra.u16 q0, q0, #8
vrsra.u16 q1, q1, #8
vrsra.u16 q2, q2, #8
vrsra.u16 q3, q3, #8
.endm
.macro pixman_composite_src_n_8_8_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d16[0]}, [DUMMY]
vdup.8 d16, d16[3]
.endm
.macro pixman_composite_src_n_8_8_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \
FLAG_DST_WRITEONLY, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_src_n_8_8_init, \
pixman_composite_src_n_8_8_cleanup, \
pixman_composite_src_n_8_8_process_pixblock_head, \
pixman_composite_src_n_8_8_process_pixblock_tail, \
pixman_composite_src_n_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8_8888_process_pixblock_head
/* expecting deinterleaved source data in {d8, d9, d10, d11} */
/* d8 - blue, d9 - green, d10 - red, d11 - alpha */
/* and destination data in {d4, d5, d6, d7} */
/* mask is in d24 (d25, d26, d27 are unused) */
/* in */
vmull.u8 q6, d24, d8
vmull.u8 q7, d24, d9
vmull.u8 q8, d24, d10
vmull.u8 q9, d24, d11
vrshr.u16 q10, q6, #8
vrshr.u16 q11, q7, #8
vrshr.u16 q12, q8, #8
vrshr.u16 q13, q9, #8
vraddhn.u16 d0, q6, q10
vraddhn.u16 d1, q7, q11
vraddhn.u16 d2, q8, q12
vraddhn.u16 d3, q9, q13
vmvn.8 d25, d3 /* get inverted alpha */
/* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */
/* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */
/* now do alpha blending */
vmull.u8 q8, d25, d4
vmull.u8 q9, d25, d5
vmull.u8 q10, d25, d6
vmull.u8 q11, d25, d7
.endm
.macro pixman_composite_over_n_8_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q6, q10, #8
vrshr.u16 q7, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q6, q10
vraddhn.u16 d31, q7, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
vrshr.u16 q14, q8, #8
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrshr.u16 q15, q9, #8
fetch_mask_pixblock
vrshr.u16 q6, q10, #8
PF add PF_X, PF_X, #8
vrshr.u16 q7, q11, #8
PF tst PF_CTL, #0x0F
vraddhn.u16 d28, q14, q8
PF addne PF_X, PF_X, #8
vraddhn.u16 d29, q15, q9
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d30, q6, q10
PF cmp PF_X, ORIG_W
vraddhn.u16 d31, q7, q11
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vmull.u8 q6, d24, d8
PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
vmull.u8 q7, d24, d9
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q8, d24, d10
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q9, d24, d11
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vqadd.u8 q14, q0, q14
PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
vqadd.u8 q15, q1, q15
vrshr.u16 q10, q6, #8
vrshr.u16 q11, q7, #8
vrshr.u16 q12, q8, #8
vrshr.u16 q13, q9, #8
vraddhn.u16 d0, q6, q10
vraddhn.u16 d1, q7, q11
vraddhn.u16 d2, q8, q12
vraddhn.u16 d3, q9, q13
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
vmvn.8 d25, d3
vmull.u8 q8, d25, d4
vmull.u8 q9, d25, d5
vmull.u8 q10, d25, d6
vmull.u8 q11, d25, d7
.endm
.macro pixman_composite_over_n_8_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d11[0]}, [DUMMY]
vdup.8 d8, d11[0]
vdup.8 d9, d11[1]
vdup.8 d10, d11[2]
vdup.8 d11, d11[3]
.endm
.macro pixman_composite_over_n_8_8888_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8_8888_init, \
pixman_composite_over_n_8_8888_cleanup, \
pixman_composite_over_n_8_8888_process_pixblock_head, \
pixman_composite_over_n_8_8888_process_pixblock_tail, \
pixman_composite_over_n_8_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8_8_process_pixblock_head
vmull.u8 q0, d24, d8
vmull.u8 q1, d25, d8
vmull.u8 q6, d26, d8
vmull.u8 q7, d27, d8
vrshr.u16 q10, q0, #8
vrshr.u16 q11, q1, #8
vrshr.u16 q12, q6, #8
vrshr.u16 q13, q7, #8
vraddhn.u16 d0, q0, q10
vraddhn.u16 d1, q1, q11
vraddhn.u16 d2, q6, q12
vraddhn.u16 d3, q7, q13
vmvn.8 q12, q0
vmvn.8 q13, q1
vmull.u8 q8, d24, d4
vmull.u8 q9, d25, d5
vmull.u8 q10, d26, d6
vmull.u8 q11, d27, d7
.endm
.macro pixman_composite_over_n_8_8_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_n_8_8_process_pixblock_tail_head
vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
pixman_composite_over_n_8_8_process_pixblock_tail
fetch_mask_pixblock
cache_preload 32, 32
vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
pixman_composite_over_n_8_8_process_pixblock_head
.endm
.macro pixman_composite_over_n_8_8_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d8[0]}, [DUMMY]
vdup.8 d8, d8[3]
.endm
.macro pixman_composite_over_n_8_8_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8_8_init, \
pixman_composite_over_n_8_8_cleanup, \
pixman_composite_over_n_8_8_process_pixblock_head, \
pixman_composite_over_n_8_8_process_pixblock_tail, \
pixman_composite_over_n_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {d8, d9, d10, d11}
* dest in {d4, d5, d6, d7 }
* mask in {d24, d25, d26, d27}
* output: updated src in {d0, d1, d2, d3 }
* updated mask in {d24, d25, d26, d3 }
*/
vmull.u8 q0, d24, d8
vmull.u8 q1, d25, d9
vmull.u8 q6, d26, d10
vmull.u8 q7, d27, d11
vmull.u8 q9, d11, d25
vmull.u8 q12, d11, d24
vmull.u8 q13, d11, d26
vrshr.u16 q8, q0, #8
vrshr.u16 q10, q1, #8
vrshr.u16 q11, q6, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q10
vraddhn.u16 d2, q6, q11
vrshr.u16 q11, q12, #8
vrshr.u16 q8, q9, #8
vrshr.u16 q6, q13, #8
vrshr.u16 q10, q7, #8
vraddhn.u16 d24, q12, q11
vraddhn.u16 d25, q9, q8
vraddhn.u16 d26, q13, q6
vraddhn.u16 d3, q7, q10
/*
* 'combine_over_ca' replacement
*
* output: updated dest in {d28, d29, d30, d31}
*/
vmvn.8 q12, q12
vmvn.8 d26, d26
vmull.u8 q8, d24, d4
vmull.u8 q9, d25, d5
vmvn.8 d27, d3
vmull.u8 q10, d26, d6
vmull.u8 q11, d27, d7
.endm
.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail
/* ... continue 'combine_over_ca' replacement */
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q6, q10, #8
vrshr.u16 q7, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q6, q10
vraddhn.u16 d31, q7, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrshr.u16 q6, q10, #8
vrshr.u16 q7, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q6, q10
vraddhn.u16 d31, q7, q11
fetch_mask_pixblock
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
cache_preload 8, 8
pixman_composite_over_n_8888_8888_ca_process_pixblock_head
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
.macro pixman_composite_over_n_8888_8888_ca_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d11[0]}, [DUMMY]
vdup.8 d8, d11[0]
vdup.8 d9, d11[1]
vdup.8 d10, d11[2]
vdup.8 d11, d11[3]
.endm
.macro pixman_composite_over_n_8888_8888_ca_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_8888_ca_init, \
pixman_composite_over_n_8888_8888_ca_cleanup, \
pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \
pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \
pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A]
* mask in {d24, d25, d26} [B, G, R]
* output: updated src in {d0, d1, d2 } [B, G, R]
* updated mask in {d24, d25, d26} [B, G, R]
*/
vmull.u8 q0, d24, d8
vmull.u8 q1, d25, d9
vmull.u8 q6, d26, d10
vmull.u8 q9, d11, d25
vmull.u8 q12, d11, d24
vmull.u8 q13, d11, d26
vrshr.u16 q8, q0, #8
vrshr.u16 q10, q1, #8
vrshr.u16 q11, q6, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q10
vraddhn.u16 d2, q6, q11
vrshr.u16 q11, q12, #8
vrshr.u16 q8, q9, #8
vrshr.u16 q6, q13, #8
vraddhn.u16 d24, q12, q11
vraddhn.u16 d25, q9, q8
/*
* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
* and put data into d16 - blue, d17 - green, d18 - red
*/
vshrn.u16 d17, q2, #3
vshrn.u16 d18, q2, #8
vraddhn.u16 d26, q13, q6
vsli.u16 q2, q2, #5
vsri.u8 d18, d18, #5
vsri.u8 d17, d17, #6
/*
* 'combine_over_ca' replacement
*
* output: updated dest in d16 - blue, d17 - green, d18 - red
*/
vmvn.8 q12, q12
vshrn.u16 d16, q2, #2
vmvn.8 d26, d26
vmull.u8 q6, d16, d24
vmull.u8 q7, d17, d25
vmull.u8 q11, d18, d26
.endm
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail
/* ... continue 'combine_over_ca' replacement */
vrshr.u16 q10, q6, #8
vrshr.u16 q14, q7, #8
vrshr.u16 q15, q11, #8
vraddhn.u16 d16, q10, q6
vraddhn.u16 d17, q14, q7
vraddhn.u16 d18, q15, q11
vqadd.u8 q8, q0, q8
vqadd.u8 d18, d2, d18
/*
* convert the results in d16, d17, d18 to r5g6b5 and store
* them into {d28, d29}
*/
vshll.u8 q14, d18, #8
vshll.u8 q10, d17, #8
vshll.u8 q15, d16, #8
vsri.u16 q14, q10, #5
vsri.u16 q14, q15, #11
.endm
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
fetch_mask_pixblock
vrshr.u16 q10, q6, #8
vrshr.u16 q14, q7, #8
vld1.16 {d4, d5}, [DST_R, :128]!
vrshr.u16 q15, q11, #8
vraddhn.u16 d16, q10, q6
vraddhn.u16 d17, q14, q7
vraddhn.u16 d22, q15, q11
/* process_pixblock_head */
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A]
* mask in {d24, d25, d26} [B, G, R]
* output: updated src in {d0, d1, d2 } [B, G, R]
* updated mask in {d24, d25, d26} [B, G, R]
*/
vmull.u8 q6, d26, d10
vqadd.u8 q8, q0, q8
vmull.u8 q0, d24, d8
vqadd.u8 d22, d2, d22
vmull.u8 q1, d25, d9
/*
* convert the result in d16, d17, d22 to r5g6b5 and store
* it into {d28, d29}
*/
vshll.u8 q14, d22, #8
vshll.u8 q10, d17, #8
vshll.u8 q15, d16, #8
vmull.u8 q9, d11, d25
vsri.u16 q14, q10, #5
vmull.u8 q12, d11, d24
vmull.u8 q13, d11, d26
vsri.u16 q14, q15, #11
cache_preload 8, 8
vrshr.u16 q8, q0, #8
vrshr.u16 q10, q1, #8
vrshr.u16 q11, q6, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q10
vraddhn.u16 d2, q6, q11
vrshr.u16 q11, q12, #8
vrshr.u16 q8, q9, #8
vrshr.u16 q6, q13, #8
vraddhn.u16 d24, q12, q11
vraddhn.u16 d25, q9, q8
/*
* convert 8 r5g6b5 pixel data from {d4, d5} to planar
* 8-bit format and put data into d16 - blue, d17 - green,
* d18 - red
*/
vshrn.u16 d17, q2, #3
vshrn.u16 d18, q2, #8
vraddhn.u16 d26, q13, q6
vsli.u16 q2, q2, #5
vsri.u8 d17, d17, #6
vsri.u8 d18, d18, #5
/*
* 'combine_over_ca' replacement
*
* output: updated dest in d16 - blue, d17 - green, d18 - red
*/
vmvn.8 q12, q12
vshrn.u16 d16, q2, #2
vmvn.8 d26, d26
vmull.u8 q7, d17, d25
vmull.u8 q6, d16, d24
vmull.u8 q11, d18, d26
vst1.16 {d28, d29}, [DST_W, :128]!
.endm
.macro pixman_composite_over_n_8888_0565_ca_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d11[0]}, [DUMMY]
vdup.8 d8, d11[0]
vdup.8 d9, d11[1]
vdup.8 d10, d11[2]
vdup.8 d11, d11[3]
.endm
.macro pixman_composite_over_n_8888_0565_ca_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_0565_ca_init, \
pixman_composite_over_n_8888_0565_ca_cleanup, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_in_n_8_process_pixblock_head
/* expecting source data in {d0, d1, d2, d3} */
/* and destination data in {d4, d5, d6, d7} */
vmull.u8 q8, d4, d3
vmull.u8 q9, d5, d3
vmull.u8 q10, d6, d3
vmull.u8 q11, d7, d3
.endm
.macro pixman_composite_in_n_8_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q8, q14
vraddhn.u16 d29, q9, q15
vraddhn.u16 d30, q10, q12
vraddhn.u16 d31, q11, q13
.endm
.macro pixman_composite_in_n_8_process_pixblock_tail_head
pixman_composite_in_n_8_process_pixblock_tail
vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
cache_preload 32, 32
pixman_composite_in_n_8_process_pixblock_head
vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
.macro pixman_composite_in_n_8_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d3, d3[3]
.endm
.macro pixman_composite_in_n_8_cleanup
.endm
generate_composite_function \
pixman_composite_in_n_8_asm_neon, 0, 0, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_in_n_8_init, \
pixman_composite_in_n_8_cleanup, \
pixman_composite_in_n_8_process_pixblock_head, \
pixman_composite_in_n_8_process_pixblock_tail, \
pixman_composite_in_n_8_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
.macro pixman_composite_add_n_8_8_process_pixblock_head
/* expecting source data in {d8, d9, d10, d11} */
/* d8 - blue, d9 - green, d10 - red, d11 - alpha */
/* and destination data in {d4, d5, d6, d7} */
/* mask is in d24, d25, d26, d27 */
vmull.u8 q0, d24, d11
vmull.u8 q1, d25, d11
vmull.u8 q6, d26, d11
vmull.u8 q7, d27, d11
vrshr.u16 q10, q0, #8
vrshr.u16 q11, q1, #8
vrshr.u16 q12, q6, #8
vrshr.u16 q13, q7, #8
vraddhn.u16 d0, q0, q10
vraddhn.u16 d1, q1, q11
vraddhn.u16 d2, q6, q12
vraddhn.u16 d3, q7, q13
vqadd.u8 q14, q0, q2
vqadd.u8 q15, q1, q3
.endm
.macro pixman_composite_add_n_8_8_process_pixblock_tail
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
pixman_composite_add_n_8_8_process_pixblock_tail
vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
fetch_mask_pixblock
cache_preload 32, 32
pixman_composite_add_n_8_8_process_pixblock_head
.endm
.macro pixman_composite_add_n_8_8_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d11[0]}, [DUMMY]
vdup.8 d11, d11[3]
.endm
.macro pixman_composite_add_n_8_8_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_n_8_8_init, \
pixman_composite_add_n_8_8_cleanup, \
pixman_composite_add_n_8_8_process_pixblock_head, \
pixman_composite_add_n_8_8_process_pixblock_tail, \
pixman_composite_add_n_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8_8_8_process_pixblock_head
/* expecting source data in {d0, d1, d2, d3} */
/* destination data in {d4, d5, d6, d7} */
/* mask in {d24, d25, d26, d27} */
vmull.u8 q8, d24, d0
vmull.u8 q9, d25, d1
vmull.u8 q10, d26, d2
vmull.u8 q11, d27, d3
vrshr.u16 q0, q8, #8
vrshr.u16 q1, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q9
vraddhn.u16 d2, q12, q10
vraddhn.u16 d3, q13, q11
vqadd.u8 q14, q0, q2
vqadd.u8 q15, q1, q3
.endm
.macro pixman_composite_add_8_8_8_process_pixblock_tail
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
pixman_composite_add_8_8_8_process_pixblock_tail
vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
fetch_mask_pixblock
fetch_src_pixblock
cache_preload 32, 32
pixman_composite_add_8_8_8_process_pixblock_head
.endm
.macro pixman_composite_add_8_8_8_init
.endm
.macro pixman_composite_add_8_8_8_cleanup
.endm
generate_composite_function \
pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_8_8_8_init, \
pixman_composite_add_8_8_8_cleanup, \
pixman_composite_add_8_8_8_process_pixblock_head, \
pixman_composite_add_8_8_8_process_pixblock_tail, \
pixman_composite_add_8_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8888_8888_8888_process_pixblock_head
/* expecting source data in {d0, d1, d2, d3} */
/* destination data in {d4, d5, d6, d7} */
/* mask in {d24, d25, d26, d27} */
vmull.u8 q8, d27, d0
vmull.u8 q9, d27, d1
vmull.u8 q10, d27, d2
vmull.u8 q11, d27, d3
/* 1 cycle bubble */
vrsra.u16 q8, q8, #8
vrsra.u16 q9, q9, #8
vrsra.u16 q10, q10, #8
vrsra.u16 q11, q11, #8
.endm
.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail
/* 2 cycle bubble */
vrshrn.u16 d28, q8, #8
vrshrn.u16 d29, q9, #8
vrshrn.u16 d30, q10, #8
vrshrn.u16 d31, q11, #8
vqadd.u8 q14, q2, q14
/* 1 cycle bubble */
vqadd.u8 q15, q3, q15
.endm
.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
fetch_src_pixblock
vrshrn.u16 d28, q8, #8
fetch_mask_pixblock
vrshrn.u16 d29, q9, #8
vmull.u8 q8, d27, d0
vrshrn.u16 d30, q10, #8
vmull.u8 q9, d27, d1
vrshrn.u16 d31, q11, #8
vmull.u8 q10, d27, d2
vqadd.u8 q14, q2, q14
vmull.u8 q11, d27, d3
vqadd.u8 q15, q3, q15
vrsra.u16 q8, q8, #8
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrsra.u16 q9, q9, #8
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
vrsra.u16 q10, q10, #8
cache_preload 8, 8
vrsra.u16 q11, q11, #8
.endm
generate_composite_function \
pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
/******************************************************************************/
generate_composite_function \
pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
27 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_add_n_8_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d0, d3[0]
vdup.8 d1, d3[1]
vdup.8 d2, d3[2]
vdup.8 d3, d3[3]
.endm
.macro pixman_composite_add_n_8_8888_cleanup
.endm
generate_composite_function \
pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_n_8_8888_init, \
pixman_composite_add_n_8_8888_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
27 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_add_8888_n_8888_init
add DUMMY, sp, #(ARGS_STACK_OFFSET + 8)
vld1.32 {d27[0]}, [DUMMY]
vdup.8 d27, d27[3]
.endm
.macro pixman_composite_add_8888_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_8888_n_8888_init, \
pixman_composite_add_8888_n_8888_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
27 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
/* expecting source data in {d0, d1, d2, d3} */
/* destination data in {d4, d5, d6, d7} */
/* solid mask is in d15 */
/* 'in' */
vmull.u8 q8, d15, d3
vmull.u8 q6, d15, d2
vmull.u8 q5, d15, d1
vmull.u8 q4, d15, d0
vrshr.u16 q13, q8, #8
vrshr.u16 q12, q6, #8
vrshr.u16 q11, q5, #8
vrshr.u16 q10, q4, #8
vraddhn.u16 d3, q8, q13
vraddhn.u16 d2, q6, q12
vraddhn.u16 d1, q5, q11
vraddhn.u16 d0, q4, q10
vmvn.8 d24, d3 /* get inverted alpha */
/* now do alpha blending */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
fetch_mask_pixblock
pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
generate_composite_function_single_scanline \
pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \
pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \
pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_8888_n_8888_process_pixblock_head
pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
.endm
.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
pixman_composite_over_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
pixman_composite_over_8888_n_8888_process_pixblock_head
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
.macro pixman_composite_over_8888_n_8888_init
add DUMMY, sp, #48
vpush {d8-d15}
vld1.32 {d15[0]}, [DUMMY]
vdup.8 d15, d15[3]
.endm
.macro pixman_composite_over_8888_n_8888_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_8888_n_8888_init, \
pixman_composite_over_8888_n_8888_cleanup, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_n_8888_process_pixblock_tail_head
/******************************************************************************/
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
pixman_composite_over_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
fetch_mask_pixblock
pixman_composite_over_8888_n_8888_process_pixblock_head
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
generate_composite_function \
pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
generate_composite_function_single_scanline \
pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
/******************************************************************************/
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
pixman_composite_over_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
fetch_mask_pixblock
pixman_composite_over_8888_n_8888_process_pixblock_head
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
generate_composite_function \
pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_8_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0888_0888_process_pixblock_head
.endm
.macro pixman_composite_src_0888_0888_process_pixblock_tail
.endm
.macro pixman_composite_src_0888_0888_process_pixblock_tail_head
vst3.8 {d0, d1, d2}, [DST_W]!
fetch_src_pixblock
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0888_0888_process_pixblock_head, \
pixman_composite_src_0888_0888_process_pixblock_tail, \
pixman_composite_src_0888_0888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0888_8888_rev_process_pixblock_head
vswp d0, d2
.endm
.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail
.endm
.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head
vst4.8 {d0, d1, d2, d3}, [DST_W]!
fetch_src_pixblock
vswp d0, d2
cache_preload 8, 8
.endm
.macro pixman_composite_src_0888_8888_rev_init
veor d3, d3, d3
.endm
generate_composite_function \
pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
pixman_composite_src_0888_8888_rev_init, \
default_cleanup, \
pixman_composite_src_0888_8888_rev_process_pixblock_head, \
pixman_composite_src_0888_8888_rev_process_pixblock_tail, \
pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0888_0565_rev_process_pixblock_head
vshll.u8 q8, d1, #8
vshll.u8 q9, d2, #8
.endm
.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail
vshll.u8 q14, d0, #8
vsri.u16 q14, q8, #5
vsri.u16 q14, q9, #11
.endm
.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head
vshll.u8 q14, d0, #8
fetch_src_pixblock
vsri.u16 q14, q8, #5
vsri.u16 q14, q9, #11
vshll.u8 q8, d1, #8
vst1.16 {d28, d29}, [DST_W, :128]!
vshll.u8 q9, d2, #8
.endm
generate_composite_function \
pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0888_0565_rev_process_pixblock_head, \
pixman_composite_src_0888_0565_rev_process_pixblock_tail, \
pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_pixbuf_8888_process_pixblock_head
vmull.u8 q8, d3, d0
vmull.u8 q9, d3, d1
vmull.u8 q10, d3, d2
.endm
.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail
vrshr.u16 q11, q8, #8
vswp d3, d31
vrshr.u16 q12, q9, #8
vrshr.u16 q13, q10, #8
vraddhn.u16 d30, q11, q8
vraddhn.u16 d29, q12, q9
vraddhn.u16 d28, q13, q10
.endm
.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head
vrshr.u16 q11, q8, #8
vswp d3, d31
vrshr.u16 q12, q9, #8
vrshr.u16 q13, q10, #8
fetch_src_pixblock
vraddhn.u16 d30, q11, q8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d29, q12, q9
vraddhn.u16 d28, q13, q10
vmull.u8 q8, d3, d0
vmull.u8 q9, d3, d1
vmull.u8 q10, d3, d2
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF cmp PF_X, ORIG_W
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
PF subge PF_X, PF_X, ORIG_W
PF subges PF_CTL, PF_CTL, #0x10
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
.endm
generate_composite_function \
pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_pixbuf_8888_process_pixblock_head, \
pixman_composite_src_pixbuf_8888_process_pixblock_tail, \
pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_rpixbuf_8888_process_pixblock_head
vmull.u8 q8, d3, d0
vmull.u8 q9, d3, d1
vmull.u8 q10, d3, d2
.endm
.macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail
vrshr.u16 q11, q8, #8
vswp d3, d31
vrshr.u16 q12, q9, #8
vrshr.u16 q13, q10, #8
vraddhn.u16 d28, q11, q8
vraddhn.u16 d29, q12, q9
vraddhn.u16 d30, q13, q10
.endm
.macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head
vrshr.u16 q11, q8, #8
vswp d3, d31
vrshr.u16 q12, q9, #8
vrshr.u16 q13, q10, #8
fetch_src_pixblock
vraddhn.u16 d28, q11, q8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d29, q12, q9
vraddhn.u16 d30, q13, q10
vmull.u8 q8, d3, d0
vmull.u8 q9, d3, d1
vmull.u8 q10, d3, d2
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF cmp PF_X, ORIG_W
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
PF subge PF_X, PF_X, ORIG_W
PF subges PF_CTL, PF_CTL, #0x10
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
.endm
generate_composite_function \
pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_rpixbuf_8888_process_pixblock_head, \
pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \
pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_0565_8_0565_process_pixblock_head
/* mask is in d15 */
convert_0565_to_x888 q4, d2, d1, d0
convert_0565_to_x888 q5, d6, d5, d4
/* source pixel data is in {d0, d1, d2, XX} */
/* destination pixel data is in {d4, d5, d6, XX} */
vmvn.8 d7, d15
vmull.u8 q6, d15, d2
vmull.u8 q5, d15, d1
vmull.u8 q4, d15, d0
vmull.u8 q8, d7, d4
vmull.u8 q9, d7, d5
vmull.u8 q13, d7, d6
vrshr.u16 q12, q6, #8
vrshr.u16 q11, q5, #8
vrshr.u16 q10, q4, #8
vraddhn.u16 d2, q6, q12
vraddhn.u16 d1, q5, q11
vraddhn.u16 d0, q4, q10
.endm
.macro pixman_composite_over_0565_8_0565_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q13, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q13
vqadd.u8 q0, q0, q14
vqadd.u8 q1, q1, q15
/* 32bpp result is in {d0, d1, d2, XX} */
convert_8888_to_0565 d2, d1, d0, q14, q15, q3
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head
fetch_mask_pixblock
pixman_composite_over_0565_8_0565_process_pixblock_tail
fetch_src_pixblock
vld1.16 {d10, d11}, [DST_R, :128]!
cache_preload 8, 8
pixman_composite_over_0565_8_0565_process_pixblock_head
vst1.16 {d28, d29}, [DST_W, :128]!
.endm
generate_composite_function \
pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_0565_8_0565_process_pixblock_head, \
pixman_composite_over_0565_8_0565_process_pixblock_tail, \
pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_0565_n_0565_init
add DUMMY, sp, #(ARGS_STACK_OFFSET + 8)
vpush {d8-d15}
vld1.32 {d15[0]}, [DUMMY]
vdup.8 d15, d15[3]
.endm
.macro pixman_composite_over_0565_n_0565_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_0565_n_0565_init, \
pixman_composite_over_0565_n_0565_cleanup, \
pixman_composite_over_0565_8_0565_process_pixblock_head, \
pixman_composite_over_0565_8_0565_process_pixblock_tail, \
pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_add_0565_8_0565_process_pixblock_head
/* mask is in d15 */
convert_0565_to_x888 q4, d2, d1, d0
convert_0565_to_x888 q5, d6, d5, d4
/* source pixel data is in {d0, d1, d2, XX} */
/* destination pixel data is in {d4, d5, d6, XX} */
vmull.u8 q6, d15, d2
vmull.u8 q5, d15, d1
vmull.u8 q4, d15, d0
vrshr.u16 q12, q6, #8
vrshr.u16 q11, q5, #8
vrshr.u16 q10, q4, #8
vraddhn.u16 d2, q6, q12
vraddhn.u16 d1, q5, q11
vraddhn.u16 d0, q4, q10
.endm
.macro pixman_composite_add_0565_8_0565_process_pixblock_tail
vqadd.u8 q0, q0, q2
vqadd.u8 q1, q1, q3
/* 32bpp result is in {d0, d1, d2, XX} */
convert_8888_to_0565 d2, d1, d0, q14, q15, q3
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head
fetch_mask_pixblock
pixman_composite_add_0565_8_0565_process_pixblock_tail
fetch_src_pixblock
vld1.16 {d10, d11}, [DST_R, :128]!
cache_preload 8, 8
pixman_composite_add_0565_8_0565_process_pixblock_head
vst1.16 {d28, d29}, [DST_W, :128]!
.endm
generate_composite_function \
pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_add_0565_8_0565_process_pixblock_head, \
pixman_composite_add_0565_8_0565_process_pixblock_tail, \
pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_out_reverse_8_0565_process_pixblock_head
/* mask is in d15 */
convert_0565_to_x888 q5, d6, d5, d4
/* destination pixel data is in {d4, d5, d6, xx} */
vmvn.8 d24, d15 /* get inverted alpha */
/* now do alpha blending */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
.endm
.macro pixman_composite_out_reverse_8_0565_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vraddhn.u16 d0, q14, q8
vraddhn.u16 d1, q15, q9
vraddhn.u16 d2, q12, q10
/* 32bpp result is in {d0, d1, d2, XX} */
convert_8888_to_0565 d2, d1, d0, q14, q15, q3
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head
fetch_src_pixblock
pixman_composite_out_reverse_8_0565_process_pixblock_tail
vld1.16 {d10, d11}, [DST_R, :128]!
cache_preload 8, 8
pixman_composite_out_reverse_8_0565_process_pixblock_head
vst1.16 {d28, d29}, [DST_W, :128]!
.endm
generate_composite_function \
pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_out_reverse_8_0565_process_pixblock_head, \
pixman_composite_out_reverse_8_0565_process_pixblock_tail, \
pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
15, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_out_reverse_8_8888_process_pixblock_head
/* src is in d0 */
/* destination pixel data is in {d4, d5, d6, d7} */
vmvn.8 d1, d0 /* get inverted alpha */
/* now do alpha blending */
vmull.u8 q8, d1, d4
vmull.u8 q9, d1, d5
vmull.u8 q10, d1, d6
vmull.u8 q11, d1, d7
.endm
.macro pixman_composite_out_reverse_8_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
/* 32bpp result is in {d28, d29, d30, d31} */
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head
fetch_src_pixblock
pixman_composite_out_reverse_8_8888_process_pixblock_tail
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
cache_preload 8, 8
pixman_composite_out_reverse_8_8888_process_pixblock_head
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
generate_composite_function \
pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_out_reverse_8_8888_process_pixblock_head, \
pixman_composite_out_reverse_8_8888_process_pixblock_tail, \
pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_0565_process_pixblock_head, \
pixman_composite_over_8888_0565_process_pixblock_tail, \
pixman_composite_over_8888_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_0565_process_pixblock_head, \
pixman_composite_src_8888_0565_process_pixblock_tail, \
pixman_composite_src_8888_0565_process_pixblock_tail_head
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_src_0565_8888_process_pixblock_head, \
pixman_composite_src_0565_8888_process_pixblock_tail, \
pixman_composite_src_0565_8888_process_pixblock_tail_head
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_0565_8_0565_process_pixblock_head, \
pixman_composite_over_0565_8_0565_process_pixblock_tail, \
pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
/*
* Bilinear scaling support code which tries to provide pixel fetching, color
* format conversion, and interpolation as separate macros which can be used
* as the basic building blocks for constructing bilinear scanline functions.
*/
.macro bilinear_load_8888 reg1, reg2, tmp
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
vld1.32 {reg1}, [TMP1], STRIDE
vld1.32 {reg2}, [TMP1]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
vld1.32 {reg2[0]}, [TMP1], STRIDE
vld1.32 {reg2[1]}, [TMP1]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
.macro bilinear_load_and_vertical_interpolate_two_8888 \
acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
bilinear_load_8888 reg1, reg2, tmp1
vmull.u8 acc1, reg1, d28
vmlal.u8 acc1, reg2, d29
bilinear_load_8888 reg3, reg4, tmp2
vmull.u8 acc2, reg3, d28
vmlal.u8 acc2, reg4, d29
.endm
.macro bilinear_load_and_vertical_interpolate_four_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
.endm
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #1
vld1.32 {acc2lo[0]}, [TMP1], STRIDE
vld1.32 {acc2hi[0]}, [TMP2], STRIDE
vld1.32 {acc2lo[1]}, [TMP1]
vld1.32 {acc2hi[1]}, [TMP2]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip.u8 reg1, reg3
vzip.u8 reg2, reg4
vzip.u8 reg3, reg4
vzip.u8 reg1, reg2
vmull.u8 acc1, reg1, d28
vmlal.u8 acc1, reg2, d29
vmull.u8 acc2, reg3, d28
vmlal.u8 acc2, reg4, d29
.endm
.macro bilinear_load_and_vertical_interpolate_four_0565 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #1
vld1.32 {xacc2lo[0]}, [TMP1], STRIDE
vld1.32 {xacc2hi[0]}, [TMP2], STRIDE
vld1.32 {xacc2lo[1]}, [TMP1]
vld1.32 {xacc2hi[1]}, [TMP2]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #1
vld1.32 {yacc2lo[0]}, [TMP1], STRIDE
vzip.u8 xreg1, xreg3
vld1.32 {yacc2hi[0]}, [TMP2], STRIDE
vzip.u8 xreg2, xreg4
vld1.32 {yacc2lo[1]}, [TMP1]
vzip.u8 xreg3, xreg4
vld1.32 {yacc2hi[1]}, [TMP2]
vzip.u8 xreg1, xreg2
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
vmull.u8 xacc1, xreg1, d28
vzip.u8 yreg1, yreg3
vmlal.u8 xacc1, xreg2, d29
vzip.u8 yreg2, yreg4
vmull.u8 xacc2, xreg3, d28
vzip.u8 yreg3, yreg4
vmlal.u8 xacc2, xreg4, d29
vzip.u8 yreg1, yreg2
vmull.u8 yacc1, yreg1, d28
vmlal.u8 yacc1, yreg2, d29
vmull.u8 yacc2, yreg3, d28
vmlal.u8 yacc2, yreg4, d29
.endm
.macro bilinear_store_8888 numpix, tmp1, tmp2
.if numpix == 4
vst1.32 {d0, d1}, [OUT, :128]!
.elseif numpix == 2
vst1.32 {d0}, [OUT, :64]!
.elseif numpix == 1
vst1.32 {d0[0]}, [OUT, :32]!
.else
.error bilinear_store_8888 numpix is unsupported
.endif
.endm
.macro bilinear_store_0565 numpix, tmp1, tmp2
vuzp.u8 d0, d1
vuzp.u8 d2, d3
vuzp.u8 d1, d3
vuzp.u8 d0, d2
convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2
.if numpix == 4
vst1.16 {d2}, [OUT, :64]!
.elseif numpix == 2
vst1.32 {d2[0]}, [OUT, :32]!
.elseif numpix == 1
vst1.16 {d2[0]}, [OUT, :16]!
.else
.error bilinear_store_0565 numpix is unsupported
.endif
.endm
.macro bilinear_interpolate_last_pixel src_fmt, dst_fmt
bilinear_load_&src_fmt d0, d1, d2
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
/* 5 cycles bubble */
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
/* 5 cycles bubble */
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
/* 3 cycles bubble */
vmovn.u16 d0, q0
/* 1 cycle bubble */
bilinear_store_&dst_fmt 1, q2, q3
.endm
.macro bilinear_interpolate_two_pixels src_fmt, dst_fmt
bilinear_load_and_vertical_interpolate_two_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vmovn.u16 d0, q0
bilinear_store_&dst_fmt 2, q2, q3
.endm
.macro bilinear_interpolate_four_pixels src_fmt, dst_fmt
bilinear_load_and_vertical_interpolate_four_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23 \
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
sub TMP1, TMP1, STRIDE
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d6, d30
vmlal.u16 q2, d7, d30
vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS
pld [TMP2, PF_OFFS]
vmlsl.u16 q8, d18, d31
vmlal.u16 q8, d19, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d0, q0
vmovn.u16 d1, q2
vadd.u16 q12, q12, q13
bilinear_store_&dst_fmt 4, q2, q3
.endm
.macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head
.else
bilinear_interpolate_four_pixels src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail
.endif
.endm
.macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head
.else
bilinear_interpolate_four_pixels src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head
.else
bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail
.else
bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head
.else
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.endif
.endm
.set BILINEAR_FLAG_UNROLL_4, 0
.set BILINEAR_FLAG_UNROLL_8, 1
.set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2
/*
* Main template macro for generating NEON optimized bilinear scanline
* functions.
*
* Bilinear scanline scaler macro template uses the following arguments:
* fname - name of the function to generate
* src_fmt - source color format (8888 or 0565)
* dst_fmt - destination color format (8888 or 0565)
* bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes
* prefetch_distance - prefetch in the source image by that many
* pixels ahead
*/
.macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \
src_bpp_shift, dst_bpp_shift, \
prefetch_distance, flags
pixman_asm_function fname
OUT .req r0
TOP .req r1
BOTTOM .req r2
WT .req r3
WB .req r4
X .req r5
UX .req r6
WIDTH .req ip
TMP1 .req r3
TMP2 .req r4
PF_OFFS .req r7
TMP3 .req r8
TMP4 .req r9
STRIDE .req r2
mov ip, sp
push {r4, r5, r6, r7, r8, r9}
mov PF_OFFS, #prefetch_distance
ldmia ip, {WB, X, UX, WIDTH}
mul PF_OFFS, PF_OFFS, UX
.if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0
vpush {d8-d15}
.endif
sub STRIDE, BOTTOM, TOP
.unreq BOTTOM
cmp WIDTH, #0
ble 3f
vdup.u16 q12, X
vdup.u16 q13, UX
vdup.u8 d28, WT
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
/* ensure good destination alignment */
cmp WIDTH, #1
blt 0f
tst OUT, #(1 << dst_bpp_shift)
beq 0f
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
bilinear_interpolate_last_pixel src_fmt, dst_fmt
sub WIDTH, WIDTH, #1
0:
vadd.u16 q13, q13, q13
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
cmp WIDTH, #2
blt 0f
tst OUT, #(1 << (dst_bpp_shift + 1))
beq 0f
bilinear_interpolate_two_pixels src_fmt, dst_fmt
sub WIDTH, WIDTH, #2
0:
.if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0
/*********** 8 pixels per iteration *****************/
cmp WIDTH, #4
blt 0f
tst OUT, #(1 << (dst_bpp_shift + 2))
beq 0f
bilinear_interpolate_four_pixels src_fmt, dst_fmt
sub WIDTH, WIDTH, #4
0:
subs WIDTH, WIDTH, #8
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift)
bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #8
blt 5f
0:
bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #8
bge 0b
5:
bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
1:
tst WIDTH, #4
beq 2f
bilinear_interpolate_four_pixels src_fmt, dst_fmt
2:
.else
/*********** 4 pixels per iteration *****************/
subs WIDTH, WIDTH, #4
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift)
bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #4
blt 5f
0:
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #4
bge 0b
5:
bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
1:
/****************************************************/
.endif
/* handle the remaining trailing pixels */
tst WIDTH, #2
beq 2f
bilinear_interpolate_two_pixels src_fmt, dst_fmt
2:
tst WIDTH, #1
beq 3f
bilinear_interpolate_last_pixel src_fmt, dst_fmt
3:
.if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0
vpop {d8-d15}
.endif
pop {r4, r5, r6, r7, r8, r9}
bx lr
.unreq OUT
.unreq TOP
.unreq WT
.unreq WB
.unreq X
.unreq UX
.unreq WIDTH
.unreq TMP1
.unreq TMP2
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.unreq STRIDE
.endfunc
.endm
/*****************************************************************************/
.set have_bilinear_interpolate_four_pixels_8888_8888, 1
.macro bilinear_interpolate_four_pixels_8888_8888_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vld1.32 {d22}, [TMP1], STRIDE
vld1.32 {d23}, [TMP1]
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
vmull.u8 q8, d22, d28
vmlal.u8 q8, d23, d29
vld1.32 {d22}, [TMP2], STRIDE
vld1.32 {d23}, [TMP2]
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmull.u8 q9, d22, d28
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q1, d18, d31
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d6, q0
vmovn.u16 d7, q2
vadd.u16 q12, q12, q13
vst1.32 {d6, d7}, [OUT, :128]!
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d6, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmovn.u16 d7, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vst1.32 {d6, d7}, [OUT, :128]!
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q1, d18, d31
.endm
/*****************************************************************************/
.set have_bilinear_interpolate_eight_pixels_8888_0565, 1
.macro bilinear_interpolate_eight_pixels_8888_0565_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vld1.32 {d20}, [TMP1], STRIDE
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vld1.32 {d22}, [TMP2], STRIDE
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q1, d18, d31
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d8, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmovn.u16 d9, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q1, d18, d31
.endm
.macro bilinear_interpolate_eight_pixels_8888_0565_tail
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d10, q0
vmovn.u16 d11, q2
vadd.u16 q12, q12, q13
vuzp.u8 d8, d9
vuzp.u8 d10, d11
vuzp.u8 d9, d11
vuzp.u8 d8, d10
vshll.u8 q6, d9, #8
vshll.u8 q5, d10, #8
vshll.u8 q7, d8, #8
vsri.u16 q5, q6, #5
vsri.u16 q5, q7, #11
vst1.32 {d10, d11}, [OUT, :128]!
.endm
.macro bilinear_interpolate_eight_pixels_8888_0565_tail_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vuzp.u8 d8, d9
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d10, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmovn.u16 d11, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vuzp.u8 d10, d11
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q1, d18, d31
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vuzp.u8 d9, d11
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vuzp.u8 d8, d10
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshll.u8 q6, d9, #8
vshll.u8 q5, d10, #8
vshll.u8 q7, d8, #8
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vsri.u16 q5, q6, #5
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vsri.u16 q5, q7, #11
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d8, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmovn.u16 d9, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vst1.32 {d10, d11}, [OUT, :128]!
vmlsl.u16 q1, d18, d31
.endm
/*****************************************************************************/
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \
2, 2, 28, BILINEAR_FLAG_UNROLL_4
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \
2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \
1, 2, 28, BILINEAR_FLAG_UNROLL_4
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \
1, 1, 28, BILINEAR_FLAG_UNROLL_4
|
iMAGRAY/Shelldone | 43,532 | deps/cairo/pixman/pixman/pixman-arma64-neon-asm-bilinear.S | /*
* Copyright © 2011 SCore Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
* Author: Taekyun Kim (tkq.kim@samsung.com)
*/
/*
* This file contains scaled bilinear scanline functions implemented
* using older siarhei's bilinear macro template.
*
* << General scanline function procedures >>
* 1. bilinear interpolate source pixels
* 2. load mask pixels
* 3. load destination pixels
* 4. duplicate mask to fill whole register
* 5. interleave source & destination pixels
* 6. apply mask to source pixels
* 7. combine source & destination pixels
* 8, Deinterleave final result
* 9. store destination pixels
*
* All registers with single number (i.e. src0, tmp0) are 64-bits registers.
* Registers with double numbers(src01, dst01) are 128-bits registers.
* All temp registers can be used freely outside the code block.
* Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks.
*
* Remarks
* There can be lots of pipeline stalls inside code block and between code blocks.
* Further optimizations will be done by new macro templates using head/tail_head/tail scheme.
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined (__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.arch armv8-a
.altmacro
.p2align 2
#include "pixman-private.h"
#include "pixman-arm-asm.h"
#include "pixman-arma64-neon-asm.h"
/*
* Bilinear macros from pixman-arm-neon-asm.S
*/
/*
* Bilinear scaling support code which tries to provide pixel fetching, color
* format conversion, and interpolation as separate macros which can be used
* as the basic building blocks for constructing bilinear scanline functions.
*/
.macro bilinear_load_8888 reg1, reg2, tmp
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #2
ld1 {®1&.2s}, [TMP1], STRIDE
ld1 {®2&.2s}, [TMP1]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
ld1 {®2&.s}[0], [TMP1], STRIDE
ld1 {®2&.s}[1], [TMP1]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
.macro bilinear_load_and_vertical_interpolate_two_8888 \
acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
bilinear_load_8888 reg1, reg2, tmp1
umull &acc1&.8h, ®1&.8b, v28.8b
umlal &acc1&.8h, ®2&.8b, v29.8b
bilinear_load_8888 reg3, reg4, tmp2
umull &acc2&.8h, ®3&.8b, v28.8b
umlal &acc2&.8h, ®4&.8b, v29.8b
.endm
.macro bilinear_load_and_vertical_interpolate_four_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
.endm
.macro vzip reg1, reg2
zip1 v24.8b, reg1, reg2
zip2 reg2, reg1, reg2
mov reg1, v24.8b
.endm
.macro vuzp reg1, reg2
uzp1 v24.8b, reg1, reg2
uzp2 reg2, reg1, reg2
mov reg1, v24.8b
.endm
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
asr WTMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #1
ld1 {&acc2&.s}[0], [TMP1], STRIDE
ld1 {&acc2&.s}[2], [TMP2], STRIDE
ld1 {&acc2&.s}[1], [TMP1]
ld1 {&acc2&.s}[3], [TMP2]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip ®1&.8b, ®3&.8b
vzip ®2&.8b, ®4&.8b
vzip ®3&.8b, ®4&.8b
vzip ®1&.8b, ®2&.8b
umull &acc1&.8h, ®1&.8b, v28.8b
umlal &acc1&.8h, ®2&.8b, v29.8b
umull &acc2&.8h, ®3&.8b, v28.8b
umlal &acc2&.8h, ®4&.8b, v29.8b
.endm
.macro bilinear_load_and_vertical_interpolate_four_0565 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
asr WTMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #1
ld1 {&xacc2&.s}[0], [TMP1], STRIDE
ld1 {&xacc2&.s}[2], [TMP2], STRIDE
ld1 {&xacc2&.s}[1], [TMP1]
ld1 {&xacc2&.s}[3], [TMP2]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
asr WTMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #1
ld1 {&yacc2&.s}[0], [TMP1], STRIDE
vzip &xreg1&.8b, &xreg3&.8b
ld1 {&yacc2&.s}[2], [TMP2], STRIDE
vzip &xreg2&.8b, &xreg4&.8b
ld1 {&yacc2&.s}[1], [TMP1]
vzip &xreg3&.8b, &xreg4&.8b
ld1 {&yacc2&.s}[3], [TMP2]
vzip &xreg1&.8b, &xreg2&.8b
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
umull &xacc1&.8h, &xreg1&.8b, v28.8b
vzip &yreg1&.8b, &yreg3&.8b
umlal &xacc1&.8h, &xreg2&.8b, v29.8b
vzip &yreg2&.8b, &yreg4&.8b
umull &xacc2&.8h, &xreg3&.8b, v28.8b
vzip &yreg3&.8b, &yreg4&.8b
umlal &xacc2&.8h, &xreg4&.8b, v29.8b
vzip &yreg1&.8b, &yreg2&.8b
umull &yacc1&.8h, &yreg1&.8b, v28.8b
umlal &yacc1&.8h, &yreg2&.8b, v29.8b
umull &yacc2&.8h, &yreg3&.8b, v28.8b
umlal &yacc2&.8h, &yreg4&.8b, v29.8b
.endm
.macro bilinear_store_8888 numpix, tmp1, tmp2
.if numpix == 4
st1 {v0.2s, v1.2s}, [OUT], #16
.elseif numpix == 2
st1 {v0.2s}, [OUT], #8
.elseif numpix == 1
st1 {v0.s}[0], [OUT], #4
.else
.error bilinear_store_8888 numpix is unsupported
.endif
.endm
.macro bilinear_store_0565 numpix, tmp1, tmp2
vuzp v0.8b, v1.8b
vuzp v2.8b, v3.8b
vuzp v1.8b, v3.8b
vuzp v0.8b, v2.8b
convert_8888_to_0565 v2, v1, v0, v1, tmp1, tmp2
.if numpix == 4
st1 {v1.4h}, [OUT], #8
.elseif numpix == 2
st1 {v1.s}[0], [OUT], #4
.elseif numpix == 1
st1 {v1.h}[0], [OUT], #2
.else
.error bilinear_store_0565 numpix is unsupported
.endif
.endm
/*
* Macros for loading mask pixels into register 'mask'.
* dup must be done in somewhere else.
*/
.macro bilinear_load_mask_x numpix, mask
.endm
.macro bilinear_load_mask_8 numpix, mask
.if numpix == 4
ld1 {&mask&.s}[0], [MASK], #4
.elseif numpix == 2
ld1 {&mask&.h}[0], [MASK], #2
.elseif numpix == 1
ld1 {&mask&.b}[0], [MASK], #1
.else
.error bilinear_load_mask_8 numpix is unsupported
.endif
prfm PREFETCH_MODE, [MASK, #prefetch_offset]
.endm
.macro bilinear_load_mask mask_fmt, numpix, mask
bilinear_load_mask_&mask_fmt numpix, mask
.endm
/*
* Macros for loading destination pixels into register 'dst0' and 'dst1'.
* Interleave should be done somewhere else.
*/
.macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.if numpix == 4
ld1 {&dst0&.2s, &dst1&.2s}, [OUT]
.elseif numpix == 2
ld1 {&dst0&.2s}, [OUT]
.elseif numpix == 1
ld1 {&dst0&.s}[0], [OUT]
.else
.error bilinear_load_dst_8888 numpix is unsupported
.endif
mov &dst01&.d[0], &dst0&.d[0]
mov &dst01&.d[1], &dst1&.d[0]
prfm PREFETCH_MODE, [OUT, #(prefetch_offset * 4)]
.endm
.macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01
bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01
bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01
bilinear_load_dst_&dst_fmt&_&op numpix, dst0, dst1, dst01
.endm
/*
* Macros for duplicating partially loaded mask to fill entire register.
* We will apply mask to interleaved source pixels, that is
* (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3)
* (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3)
* So, we need to duplicate loaded mask into whole register.
*
* For two pixel case
* (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* We can do some optimizations for this including last pixel cases.
*/
.macro bilinear_duplicate_mask_x numpix, mask
.endm
.macro bilinear_duplicate_mask_8 numpix, mask
.if numpix == 4
dup &mask&.2s, &mask&.s[0]
.elseif numpix == 2
dup &mask&.4h, &mask&.h[0]
.elseif numpix == 1
dup &mask&.8b, &mask&.b[0]
.else
.error bilinear_duplicate_mask_8 is unsupported
.endif
.endm
.macro bilinear_duplicate_mask mask_fmt, numpix, mask
bilinear_duplicate_mask_&mask_fmt numpix, mask
.endm
/*
* Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
* Interleave should be done when maks is enabled or operator is 'over'.
*/
.macro bilinear_interleave src0, src1, src01, dst0, dst1, dst01
vuzp &src0&.8b, &src1&.8b
vuzp &dst0&.8b, &dst1&.8b
vuzp &src0&.8b, &src1&.8b
vuzp &dst0&.8b, &dst1&.8b
mov &src01&.d[1], &src1&.d[0]
mov &src01&.d[0], &src0&.d[0]
mov &dst01&.d[1], &dst1&.d[0]
mov &dst01&.d[0], &dst0&.d[0]
.endm
.macro bilinear_interleave_src_dst_x_src \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_x_over \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_x_add \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_8_src \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_8_over \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_8_add \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst \
mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave_src_dst_&mask_fmt&_&op \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
/*
* Macros for applying masks to src pixels. (see combine_mask_u() function)
* src, dst should be in interleaved form.
* mask register should be in form (m0, m1, m2, m3).
*/
.macro bilinear_apply_mask_to_src_x \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
.endm
.macro bilinear_apply_mask_to_src_8 \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
umull &tmp01&.8h, &src0&.8b, &mask&.8b
umull &tmp23&.8h, &src1&.8b, &mask&.8b
/* bubbles */
urshr &tmp45&.8h, &tmp01&.8h, #8
urshr &tmp67&.8h, &tmp23&.8h, #8
/* bubbles */
raddhn &src0&.8b, &tmp45&.8h, &tmp01&.8h
raddhn &src1&.8b, &tmp67&.8h, &tmp23&.8h
mov &src01&.d[0], &src0&.d[0]
mov &src01&.d[1], &src1&.d[0]
.endm
.macro bilinear_apply_mask_to_src \
mask_fmt, numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
bilinear_apply_mask_to_src_&mask_fmt \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
.endm
/*
* Macros for combining src and destination pixels.
* Interleave or not is depending on operator 'op'.
*/
.macro bilinear_combine_src \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
.endm
.macro bilinear_combine_over \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
dup &tmp8&.2s, &src1&.s[1]
/* bubbles */
mvn &tmp8&.8b, &tmp8&.8b
/* bubbles */
umull &tmp01&.8h, &dst0&.8b, &tmp8&.8b
/* bubbles */
umull &tmp23&.8h, &dst1&.8b, &tmp8&.8b
/* bubbles */
urshr &tmp45&.8h, &tmp01&.8h, #8
urshr &tmp67&.8h, &tmp23&.8h, #8
/* bubbles */
raddhn &dst0&.8b, &tmp45&.8h, &tmp01&.8h
raddhn &dst1&.8b, &tmp67&.8h, &tmp23&.8h
mov &dst01&.d[0], &dst0&.d[0]
mov &dst01&.d[1], &dst1&.d[0]
/* bubbles */
uqadd &src0&.8b, &dst0&.8b, &src0&.8b
uqadd &src1&.8b, &dst1&.8b, &src1&.8b
mov &src01&.d[0], &src0&.d[0]
mov &src01&.d[1], &src1&.d[0]
.endm
.macro bilinear_combine_add \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
uqadd &src0&.8b, &dst0&.8b, &src0&.8b
uqadd &src1&.8b, &dst1&.8b, &src1&.8b
mov &src01&.d[0], &src0&.d[0]
mov &src01&.d[1], &src1&.d[0]
.endm
.macro bilinear_combine \
op, numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
bilinear_combine_&op \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
.endm
/*
* Macros for final deinterleaving of destination pixels if needed.
*/
.macro bilinear_deinterleave numpix, dst0, dst1, dst01
vuzp &dst0&.8b, &dst1&.8b
/* bubbles */
vuzp &dst0&.8b, &dst1&.8b
mov &dst01&.d[0], &dst0&.d[0]
mov &dst01&.d[1], &dst1&.d[0]
.endm
.macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01
bilinear_deinterleave_dst_&mask_fmt&_&op numpix, dst0, dst1, dst01
.endm
.macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op
bilinear_load_&src_fmt v0, v1, v2
bilinear_load_mask mask_fmt, 1, v4
bilinear_load_dst dst_fmt, op, 1, v18, v19, v9
umull v2.8h, v0.8b, v28.8b
umlal v2.8h, v1.8b, v29.8b
/* 5 cycles bubble */
ushll v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v2.4h, v15.h[0]
umlal2 v0.4s, v2.8h, v15.h[0]
/* 5 cycles bubble */
bilinear_duplicate_mask mask_fmt, 1, v4
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
/* 3 cycles bubble */
xtn v0.8b, v0.8h
/* 1 cycle bubble */
bilinear_interleave_src_dst \
mask_fmt, op, 1, v0, v1, v0, v18, v19, v9
bilinear_apply_mask_to_src \
mask_fmt, 1, v0, v1, v0, v4, \
v3, v8, v10, v11
bilinear_combine \
op, 1, v0, v1, v0, v18, v19, v9, \
v3, v8, v10, v11, v5
bilinear_deinterleave_dst mask_fmt, op, 1, v0, v1, v0
bilinear_store_&dst_fmt 1, v17, v18
.endm
.macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op
bilinear_load_and_vertical_interpolate_two_&src_fmt \
v1, v11, v18, v19, v20, v21, v22, v23
bilinear_load_mask mask_fmt, 2, v4
bilinear_load_dst dst_fmt, op, 2, v18, v19, v9
ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v1.4h, v15.h[0]
umlal2 v0.4s, v1.8h, v15.h[0]
ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v10.4s, v11.4h, v15.h[4]
umlal2 v10.4s, v11.8h, v15.h[4]
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
bilinear_duplicate_mask mask_fmt, 2, v4
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
xtn v0.8b, v0.8h
bilinear_interleave_src_dst \
mask_fmt, op, 2, v0, v1, v0, v18, v19, v9
bilinear_apply_mask_to_src \
mask_fmt, 2, v0, v1, v0, v4, \
v3, v8, v10, v11
bilinear_combine \
op, 2, v0, v1, v0, v18, v19, v9, \
v3, v8, v10, v11, v5
bilinear_deinterleave_dst mask_fmt, op, 2, v0, v1, v0
bilinear_store_&dst_fmt 2, v16, v17
.endm
.macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op
bilinear_load_and_vertical_interpolate_four_&src_fmt \
v1, v11, v4, v5, v6, v7, v22, v23 \
v3, v9, v16, v17, v20, v21, v18, v19
prfm PREFETCH_MODE, [TMP1, PF_OFFS]
sub TMP1, TMP1, STRIDE
prfm PREFETCH_MODE, [TMP1, PF_OFFS]
ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v1.4h, v15.h[0]
umlal2 v0.4s, v1.8h, v15.h[0]
ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v10.4s, v11.4h, v15.h[4]
umlal2 v10.4s, v11.8h, v15.h[4]
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
ushll v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v2.4s, v3.4h, v15.h[0]
umlal2 v2.4s, v3.8h, v15.h[0]
ushll v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v8.4s, v9.4h, v15.h[4]
umlal2 v8.4s, v9.8h, v15.h[4]
add v12.8h, v12.8h, v13.8h
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
bilinear_load_mask mask_fmt, 4, v4
bilinear_duplicate_mask mask_fmt, 4, v4
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
xtn v0.8b, v0.8h
xtn v1.8b, v2.8h
add v12.8h, v12.8h, v13.8h
bilinear_load_dst dst_fmt, op, 4, v2, v3, v21
bilinear_interleave_src_dst \
mask_fmt, op, 4, v0, v1, v0, v2, v3, v11
bilinear_apply_mask_to_src \
mask_fmt, 4, v0, v1, v0, v4, \
v6, v8, v9, v10
bilinear_combine \
op, 4, v0, v1, v0, v2, v3, v1, \
v6, v8, v9, v10, v23
bilinear_deinterleave_dst mask_fmt, op, 4, v0, v1, v0
bilinear_store_&dst_fmt 4, v6, v7
.endm
.set BILINEAR_FLAG_USE_MASK, 1
.set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2
/*
* Main template macro for generating NEON optimized bilinear scanline functions.
*
* Bilinear scanline generator macro take folling arguments:
* fname - name of the function to generate
* src_fmt - source color format (8888 or 0565)
* dst_fmt - destination color format (8888 or 0565)
* src/dst_bpp_shift - (1 << bpp_shift) is the size of src/dst pixel in bytes
* process_last_pixel - code block that interpolate one pixel and does not
* update horizontal weight
* process_two_pixels - code block that interpolate two pixels and update
* horizontal weight
* process_four_pixels - code block that interpolate four pixels and update
* horizontal weight
* process_pixblock_head - head part of middle loop
* process_pixblock_tail - tail part of middle loop
* process_pixblock_tail_head - tail_head of middle loop
* pixblock_size - number of pixels processed in a single middle loop
* prefetch_distance - prefetch in the source image by that many pixels ahead
*/
.macro generate_bilinear_scanline_func \
fname, \
src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \
bilinear_process_last_pixel, \
bilinear_process_two_pixels, \
bilinear_process_four_pixels, \
bilinear_process_pixblock_head, \
bilinear_process_pixblock_tail, \
bilinear_process_pixblock_tail_head, \
pixblock_size, \
prefetch_distance, \
flags
pixman_asm_function fname
.if pixblock_size == 8
.elseif pixblock_size == 4
.else
.error unsupported pixblock size
.endif
.if ((flags) & BILINEAR_FLAG_USE_MASK) == 0
OUT .req x0
TOP .req x1
BOTTOM .req x2
WT .req x3
WWT .req w3
WB .req x4
WWB .req w4
X .req w5
UX .req w6
WIDTH .req x7
TMP1 .req x10
WTMP1 .req w10
TMP2 .req x11
WTMP2 .req w11
PF_OFFS .req x12
TMP3 .req x13
WTMP3 .req w13
TMP4 .req x14
WTMP4 .req w14
STRIDE .req x15
DUMMY .req x30
stp x29, x30, [sp, -16]!
mov x29, sp
sub sp, sp, 112
sub x29, x29, 64
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
stp x10, x11, [x29, -80]
stp x12, x13, [x29, -96]
stp x14, x15, [x29, -112]
.else
OUT .req x0
MASK .req x1
TOP .req x2
BOTTOM .req x3
WT .req x4
WWT .req w4
WB .req x5
WWB .req w5
X .req w6
UX .req w7
WIDTH .req x8
TMP1 .req x10
WTMP1 .req w10
TMP2 .req x11
WTMP2 .req w11
PF_OFFS .req x12
TMP3 .req x13
WTMP3 .req w13
TMP4 .req x14
WTMP4 .req w14
STRIDE .req x15
DUMMY .req x30
.set prefetch_offset, prefetch_distance
stp x29, x30, [sp, -16]!
mov x29, sp
sub x29, x29, 64
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
stp x10, x11, [x29, -80]
stp x12, x13, [x29, -96]
stp x14, x15, [x29, -112]
str x8, [x29, -120]
ldr w8, [x29, 16]
sub sp, sp, 120
.endif
mov WTMP1, #prefetch_distance
umull PF_OFFS, WTMP1, UX
sub STRIDE, BOTTOM, TOP
.unreq BOTTOM
cmp WIDTH, #0
ble 300f
dup v12.8h, X
dup v13.8h, UX
dup v28.8b, WWT
dup v29.8b, WWB
mov v25.d[0], v12.d[1]
mov v26.d[0], v13.d[0]
add v25.4h, v25.4h, v26.4h
mov v12.d[1], v25.d[0]
/* ensure good destination alignment */
cmp WIDTH, #1
blt 100f
tst OUT, #(1 << dst_bpp_shift)
beq 100f
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
bilinear_process_last_pixel
sub WIDTH, WIDTH, #1
100:
add v13.8h, v13.8h, v13.8h
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
cmp WIDTH, #2
blt 100f
tst OUT, #(1 << (dst_bpp_shift + 1))
beq 100f
bilinear_process_two_pixels
sub WIDTH, WIDTH, #2
100:
.if pixblock_size == 8
cmp WIDTH, #4
blt 100f
tst OUT, #(1 << (dst_bpp_shift + 2))
beq 100f
bilinear_process_four_pixels
sub WIDTH, WIDTH, #4
100:
.endif
subs WIDTH, WIDTH, #pixblock_size
blt 100f
asr PF_OFFS, PF_OFFS, #(16 - src_bpp_shift)
bilinear_process_pixblock_head
subs WIDTH, WIDTH, #pixblock_size
blt 500f
0:
bilinear_process_pixblock_tail_head
subs WIDTH, WIDTH, #pixblock_size
bge 0b
500:
bilinear_process_pixblock_tail
100:
.if pixblock_size == 8
tst WIDTH, #4
beq 200f
bilinear_process_four_pixels
200:
.endif
/* handle the remaining trailing pixels */
tst WIDTH, #2
beq 200f
bilinear_process_two_pixels
200:
tst WIDTH, #1
beq 300f
bilinear_process_last_pixel
300:
.if ((flags) & BILINEAR_FLAG_USE_MASK) == 0
sub x29, x29, 64
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
ldp x10, x11, [x29, -80]
ldp x12, x13, [x29, -96]
ldp x14, x15, [x29, -112]
mov sp, x29
ldp x29, x30, [sp], 16
.else
sub x29, x29, 64
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
ldp x10, x11, [x29, -80]
ldp x12, x13, [x29, -96]
ldp x14, x15, [x29, -112]
ldr x8, [x29, -120]
mov sp, x29
ldp x29, x30, [sp], 16
.endif
ret
.unreq OUT
.unreq TOP
.unreq WT
.unreq WWT
.unreq WB
.unreq WWB
.unreq X
.unreq UX
.unreq WIDTH
.unreq TMP1
.unreq WTMP1
.unreq TMP2
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.unreq STRIDE
.if ((flags) & BILINEAR_FLAG_USE_MASK) != 0
.unreq MASK
.endif
.endfunc
.endm
/* src_8888_8_8888 */
.macro bilinear_src_8888_8_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 8888, src
.endm
.macro bilinear_src_8888_8_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, src
.endm
.macro bilinear_src_8888_8_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 8888, src
.endm
.macro bilinear_src_8888_8_8888_process_pixblock_head
bilinear_src_8888_8_8888_process_four_pixels
.endm
.macro bilinear_src_8888_8_8888_process_pixblock_tail
.endm
.macro bilinear_src_8888_8_8888_process_pixblock_tail_head
bilinear_src_8888_8_8888_process_pixblock_tail
bilinear_src_8888_8_8888_process_pixblock_head
.endm
/* src_8888_8_0565 */
.macro bilinear_src_8888_8_0565_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 0565, src
.endm
.macro bilinear_src_8888_8_0565_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 0565, src
.endm
.macro bilinear_src_8888_8_0565_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 0565, src
.endm
.macro bilinear_src_8888_8_0565_process_pixblock_head
bilinear_src_8888_8_0565_process_four_pixels
.endm
.macro bilinear_src_8888_8_0565_process_pixblock_tail
.endm
.macro bilinear_src_8888_8_0565_process_pixblock_tail_head
bilinear_src_8888_8_0565_process_pixblock_tail
bilinear_src_8888_8_0565_process_pixblock_head
.endm
/* src_0565_8_x888 */
.macro bilinear_src_0565_8_x888_process_last_pixel
bilinear_interpolate_last_pixel 0565, 8, 8888, src
.endm
.macro bilinear_src_0565_8_x888_process_two_pixels
bilinear_interpolate_two_pixels 0565, 8, 8888, src
.endm
.macro bilinear_src_0565_8_x888_process_four_pixels
bilinear_interpolate_four_pixels 0565, 8, 8888, src
.endm
.macro bilinear_src_0565_8_x888_process_pixblock_head
bilinear_src_0565_8_x888_process_four_pixels
.endm
.macro bilinear_src_0565_8_x888_process_pixblock_tail
.endm
.macro bilinear_src_0565_8_x888_process_pixblock_tail_head
bilinear_src_0565_8_x888_process_pixblock_tail
bilinear_src_0565_8_x888_process_pixblock_head
.endm
/* src_0565_8_0565 */
.macro bilinear_src_0565_8_0565_process_last_pixel
bilinear_interpolate_last_pixel 0565, 8, 0565, src
.endm
.macro bilinear_src_0565_8_0565_process_two_pixels
bilinear_interpolate_two_pixels 0565, 8, 0565, src
.endm
.macro bilinear_src_0565_8_0565_process_four_pixels
bilinear_interpolate_four_pixels 0565, 8, 0565, src
.endm
.macro bilinear_src_0565_8_0565_process_pixblock_head
bilinear_src_0565_8_0565_process_four_pixels
.endm
.macro bilinear_src_0565_8_0565_process_pixblock_tail
.endm
.macro bilinear_src_0565_8_0565_process_pixblock_tail_head
bilinear_src_0565_8_0565_process_pixblock_tail
bilinear_src_0565_8_0565_process_pixblock_head
.endm
/* over_8888_8888 */
.macro bilinear_over_8888_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, x, 8888, over
.endm
.macro bilinear_over_8888_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, x, 8888, over
.endm
.macro bilinear_over_8888_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, x, 8888, over
.endm
.macro bilinear_over_8888_8888_process_pixblock_head
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #2
asr WTMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #2
ld1 {v22.2s}, [TMP1], STRIDE
ld1 {v23.2s}, [TMP1]
asr WTMP3, X, #16
add X, X, UX
add TMP3, TOP, TMP3, lsl #2
umull v8.8h, v22.8b, v28.8b
umlal v8.8h, v23.8b, v29.8b
ld1 {v22.2s}, [TMP2], STRIDE
ld1 {v23.2s}, [TMP2]
asr WTMP4, X, #16
add X, X, UX
add TMP4, TOP, TMP4, lsl #2
umull v9.8h, v22.8b, v28.8b
umlal v9.8h, v23.8b, v29.8b
ld1 {v22.2s}, [TMP3], STRIDE
ld1 {v23.2s}, [TMP3]
umull v10.8h, v22.8b, v28.8b
umlal v10.8h, v23.8b, v29.8b
ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v8.4h, v15.h[0]
umlal2 v0.4s, v8.8h, v15.h[0]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
ld1 {v16.2s}, [TMP4], STRIDE
ld1 {v17.2s}, [TMP4]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
umull v11.8h, v16.8b, v28.8b
umlal v11.8h, v17.8b, v29.8b
ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v1.4s, v9.4h, v15.h[4]
umlal2 v1.4s, v9.8h, v15.h[4]
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
.endm
.macro bilinear_over_8888_8888_process_pixblock_tail
ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v2.4s, v10.4h, v15.h[0]
umlal2 v2.4s, v10.8h, v15.h[0]
ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v3.4s, v11.4h, v15.h[4]
umlal2 v3.4s, v11.8h, v15.h[4]
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
xtn v6.8b, v0.8h
xtn v7.8b, v2.8h
ld1 {v2.2s, v3.2s}, [OUT]
prfm PREFETCH_MODE, [OUT, #(prefetch_offset * 4)]
vuzp v6.8b, v7.8b
vuzp v2.8b, v3.8b
vuzp v6.8b, v7.8b
vuzp v2.8b, v3.8b
dup v4.2s, v7.s[1]
mvn v4.8b, v4.8b
umull v11.8h, v2.8b, v4.8b
umull v2.8h, v3.8b, v4.8b
urshr v1.8h, v11.8h, #8
urshr v10.8h, v2.8h, #8
raddhn v3.8b, v10.8h, v2.8h
raddhn v2.8b, v1.8h, v11.8h
uqadd v6.8b, v2.8b, v6.8b
uqadd v7.8b, v3.8b, v7.8b
vuzp v6.8b, v7.8b
vuzp v6.8b, v7.8b
add v12.8h, v12.8h, v13.8h
st1 {v6.2s, v7.2s}, [OUT], #16
.endm
.macro bilinear_over_8888_8888_process_pixblock_tail_head
ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
asr WTMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #2
umlsl v2.4s, v10.4h, v15.h[0]
asr WTMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #2
umlal2 v2.4s, v10.8h, v15.h[0]
ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
ld1 {v20.2s}, [TMP1], STRIDE
umlsl v3.4s, v11.4h, v15.h[4]
umlal2 v3.4s, v11.8h, v15.h[4]
ld1 {v21.2s}, [TMP1]
umull v8.8h, v20.8b, v28.8b
umlal v8.8h, v21.8b, v29.8b
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
ld1 {v22.2s}, [TMP2], STRIDE
shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
xtn v6.8b, v0.8h
ld1 {v23.2s}, [TMP2]
umull v9.8h, v22.8b, v28.8b
asr WTMP3, X, #16
add X, X, UX
add TMP3, TOP, TMP3, lsl #2
asr WTMP4, X, #16
add X, X, UX
add TMP4, TOP, TMP4, lsl #2
umlal v9.8h, v23.8b, v29.8b
xtn v7.8b, v2.8h
ld1 {v2.2s, v3.2s}, [OUT]
prfm PREFETCH_MODE, [OUT, PF_OFFS]
ld1 {v22.2s}, [TMP3], STRIDE
vuzp v6.8b, v7.8b
vuzp v2.8b, v3.8b
vuzp v6.8b, v7.8b
vuzp v2.8b, v3.8b
dup v4.2s, v7.s[1]
ld1 {v23.2s}, [TMP3]
mvn v4.8b, v4.8b
umull v10.8h, v22.8b, v28.8b
umlal v10.8h, v23.8b, v29.8b
umull v11.8h, v2.8b, v4.8b
umull v2.8h, v3.8b, v4.8b
ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v8.4h, v15.h[0]
urshr v1.8h, v11.8h, #8
umlal2 v0.4s, v8.8h, v15.h[0]
urshr v8.8h, v2.8h, #8
raddhn v3.8b, v8.8h, v2.8h
raddhn v2.8b, v1.8h, v11.8h
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
ld1 {v16.2s}, [TMP4], STRIDE
uqadd v6.8b, v2.8b, v6.8b
uqadd v7.8b, v3.8b, v7.8b
ld1 {v17.2s}, [TMP4]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
umull v11.8h, v16.8b, v28.8b
umlal v11.8h, v17.8b, v29.8b
vuzp v6.8b, v7.8b
ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
vuzp v6.8b, v7.8b
umlsl v1.4s, v9.4h, v15.h[4]
add v12.8h, v12.8h, v13.8h
umlal2 v1.4s, v9.8h, v15.h[4]
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
st1 {v6.2s, v7.2s}, [OUT], #16
.endm
/* over_8888_8_8888 */
.macro bilinear_over_8888_8_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 8888, over
.endm
.macro bilinear_over_8888_8_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, over
.endm
.macro bilinear_over_8888_8_8888_process_four_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, over
bilinear_interpolate_two_pixels 8888, 8, 8888, over
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_head
bilinear_over_8888_8_8888_process_four_pixels
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_tail
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_tail_head
bilinear_over_8888_8_8888_process_pixblock_tail
bilinear_over_8888_8_8888_process_pixblock_head
.endm
/* add_8888_8888 */
.macro bilinear_add_8888_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, x, 8888, add
.endm
.macro bilinear_add_8888_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, x, 8888, add
.endm
.macro bilinear_add_8888_8888_process_four_pixels
bilinear_interpolate_two_pixels 8888, x, 8888, add
bilinear_interpolate_two_pixels 8888, x, 8888, add
.endm
.macro bilinear_add_8888_8888_process_pixblock_head
bilinear_add_8888_8888_process_four_pixels
.endm
.macro bilinear_add_8888_8888_process_pixblock_tail
.endm
.macro bilinear_add_8888_8888_process_pixblock_tail_head
bilinear_add_8888_8888_process_pixblock_tail
bilinear_add_8888_8888_process_pixblock_head
.endm
/* add_8888_8_8888 */
.macro bilinear_add_8888_8_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 8888, add
.endm
.macro bilinear_add_8888_8_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, add
.endm
.macro bilinear_add_8888_8_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 8888, add
.endm
.macro bilinear_add_8888_8_8888_process_pixblock_head
bilinear_add_8888_8_8888_process_four_pixels
.endm
.macro bilinear_add_8888_8_8888_process_pixblock_tail
.endm
.macro bilinear_add_8888_8_8888_process_pixblock_tail_head
bilinear_add_8888_8_8888_process_pixblock_tail
bilinear_add_8888_8_8888_process_pixblock_head
.endm
/* Bilinear scanline functions */
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \
8888, 8888, 2, 2, \
bilinear_src_8888_8_8888_process_last_pixel, \
bilinear_src_8888_8_8888_process_two_pixels, \
bilinear_src_8888_8_8888_process_four_pixels, \
bilinear_src_8888_8_8888_process_pixblock_head, \
bilinear_src_8888_8_8888_process_pixblock_tail, \
bilinear_src_8888_8_8888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \
8888, 0565, 2, 1, \
bilinear_src_8888_8_0565_process_last_pixel, \
bilinear_src_8888_8_0565_process_two_pixels, \
bilinear_src_8888_8_0565_process_four_pixels, \
bilinear_src_8888_8_0565_process_pixblock_head, \
bilinear_src_8888_8_0565_process_pixblock_tail, \
bilinear_src_8888_8_0565_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \
0565, 8888, 1, 2, \
bilinear_src_0565_8_x888_process_last_pixel, \
bilinear_src_0565_8_x888_process_two_pixels, \
bilinear_src_0565_8_x888_process_four_pixels, \
bilinear_src_0565_8_x888_process_pixblock_head, \
bilinear_src_0565_8_x888_process_pixblock_tail, \
bilinear_src_0565_8_x888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \
0565, 0565, 1, 1, \
bilinear_src_0565_8_0565_process_last_pixel, \
bilinear_src_0565_8_0565_process_two_pixels, \
bilinear_src_0565_8_0565_process_four_pixels, \
bilinear_src_0565_8_0565_process_pixblock_head, \
bilinear_src_0565_8_0565_process_pixblock_tail, \
bilinear_src_0565_8_0565_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \
8888, 8888, 2, 2, \
bilinear_over_8888_8888_process_last_pixel, \
bilinear_over_8888_8888_process_two_pixels, \
bilinear_over_8888_8888_process_four_pixels, \
bilinear_over_8888_8888_process_pixblock_head, \
bilinear_over_8888_8888_process_pixblock_tail, \
bilinear_over_8888_8888_process_pixblock_tail_head, \
4, 28, 0
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \
8888, 8888, 2, 2, \
bilinear_over_8888_8_8888_process_last_pixel, \
bilinear_over_8888_8_8888_process_two_pixels, \
bilinear_over_8888_8_8888_process_four_pixels, \
bilinear_over_8888_8_8888_process_pixblock_head, \
bilinear_over_8888_8_8888_process_pixblock_tail, \
bilinear_over_8888_8_8888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \
8888, 8888, 2, 2, \
bilinear_add_8888_8888_process_last_pixel, \
bilinear_add_8888_8888_process_two_pixels, \
bilinear_add_8888_8888_process_four_pixels, \
bilinear_add_8888_8888_process_pixblock_head, \
bilinear_add_8888_8888_process_pixblock_tail, \
bilinear_add_8888_8888_process_pixblock_tail_head, \
4, 28, 0
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \
8888, 8888, 2, 2, \
bilinear_add_8888_8_8888_process_last_pixel, \
bilinear_add_8888_8_8888_process_two_pixels, \
bilinear_add_8888_8_8888_process_four_pixels, \
bilinear_add_8888_8_8888_process_pixblock_head, \
bilinear_add_8888_8_8888_process_pixblock_tail, \
bilinear_add_8888_8_8888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
|
iMAGRAY/Shelldone | 139,688 | deps/cairo/pixman/pixman/pixman-arma64-neon-asm.S | /*
* Copyright © 2009 Nokia Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
*/
/*
* This file contains implementations of NEON optimized pixel processing
* functions. There is no full and detailed tutorial, but some functions
* (those which are exposing some new or interesting features) are
* extensively commented and can be used as examples.
*
* You may want to have a look at the comments for following functions:
* - pixman_composite_over_8888_0565_asm_neon
* - pixman_composite_over_n_8_0565_asm_neon
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.arch armv8-a
.altmacro
.p2align 2
#include "pixman-private.h"
#include "pixman-arm-asm.h"
#include "pixman-arma64-neon-asm.h"
/* Global configuration options and preferences */
/*
* The code can optionally make use of unaligned memory accesses to improve
* performance of handling leading/trailing pixels for each scanline.
* Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
* example in linux if unaligned memory accesses are not configured to
* generate.exceptions.
*/
.set RESPECT_STRICT_ALIGNMENT, 1
/*
* Set default prefetch type. There is a choice between the following options:
*
* PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
* as NOP to workaround some HW bugs or for whatever other reason)
*
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
* advanced prefetch intruduces heavy overhead)
*
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
* which can run ARM and NEON instructions simultaneously so that extra ARM
* instructions do not add (many) extra cycles, but improve prefetch efficiency)
*
* Note: some types of function can't support advanced prefetch and fallback
* to simple one (those which handle 24bpp pixels)
*/
.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
/* Prefetch distance in pixels for simple prefetch */
.set PREFETCH_DISTANCE_SIMPLE, 64
/*
* Implementation of pixman_composite_over_8888_0565_asm_neon
*
* This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
* performs OVER compositing operation. Function fast_composite_over_8888_0565
* from pixman-fast-path.c does the same in C and can be used as a reference.
*
* First we need to have some NEON assembly code which can do the actual
* operation on the pixels and provide it to the template macro.
*
* Template macro quite conveniently takes care of emitting all the necessary
* code for memory reading and writing (including quite tricky cases of
* handling unaligned leading/trailing pixels), so we only need to deal with
* the data in NEON registers.
*
* NEON registers allocation in general is recommented to be the following:
* v0, v1, v2, v3 - contain loaded source pixel data
* v4, v5, v6, v7 - contain loaded destination pixels (if they are needed)
* v24, v25, v26, v27 - contain loading mask pixel data (if mask is used)
* v28, v29, v30, v31 - place for storing the result (destination pixels)
*
* As can be seen above, four 64-bit NEON registers are used for keeping
* intermediate pixel data and up to 8 pixels can be processed in one step
* for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
*
* This particular function uses the following registers allocation:
* v0, v1, v2, v3 - contain loaded source pixel data
* v4, v5 - contain loaded destination pixels (they are needed)
* v28, v29 - place for storing the result (destination pixels)
*/
/*
* Step one. We need to have some code to do some arithmetics on pixel data.
* This is implemented as a pair of macros: '*_head' and '*_tail'. When used
* back-to-back, they take pixel data from {v0, v1, v2, v3} and {v4, v5},
* perform all the needed calculations and write the result to {v28, v29}.
* The rationale for having two macros and not just one will be explained
* later. In practice, any single monolitic function which does the work can
* be split into two parts in any arbitrary way without affecting correctness.
*
* There is one special trick here too. Common template macro can optionally
* make our life a bit easier by doing R, G, B, A color components
* deinterleaving for 32bpp pixel formats (and this feature is used in
* 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
* instead of having 8 packed pixels in {v0, v1, v2, v3} registers, we
* actually use v0 register for blue channel (a vector of eight 8-bit
* values), v1 register for green, v2 for red and v3 for alpha. This
* simple conversion can be also done with a few NEON instructions:
*
* Packed to planar conversion: // vuzp8 is a wrapper macro
* vuzp8 v0, v1
* vuzp8 v2, v3
* vuzp8 v1, v3
* vuzp8 v0, v2
*
* Planar to packed conversion: // vzip8 is a wrapper macro
* vzip8 v0, v2
* vzip8 v1, v3
* vzip8 v2, v3
* vzip8 v0, v1
*
* But pixel can be loaded directly in planar format using LD4 / b NEON
* instruction. It is 1 cycle slower than LD1 / s, so this is not always
* desirable, that's why deinterleaving is optional.
*
* But anyway, here is the code:
*/
.macro pixman_composite_over_8888_0565_process_pixblock_head
/* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format
and put data into v6 - red, v7 - green, v30 - blue */
mov v4.d[1], v5.d[0]
shrn v6.8b, v4.8h, #8
shrn v7.8b, v4.8h, #3
sli v4.8h, v4.8h, #5
sri v6.8b, v6.8b, #5
mvn v3.8b, v3.8b /* invert source alpha */
sri v7.8b, v7.8b, #6
shrn v30.8b, v4.8h, #2
/* now do alpha blending, storing results in 8-bit planar format
into v20 - red, v23 - green, v22 - blue */
umull v10.8h, v3.8b, v6.8b
umull v11.8h, v3.8b, v7.8b
umull v12.8h, v3.8b, v30.8b
urshr v17.8h, v10.8h, #8
urshr v18.8h, v11.8h, #8
urshr v19.8h, v12.8h, #8
raddhn v20.8b, v10.8h, v17.8h
raddhn v23.8b, v11.8h, v18.8h
raddhn v22.8b, v12.8h, v19.8h
.endm
.macro pixman_composite_over_8888_0565_process_pixblock_tail
/* ... continue alpha blending */
uqadd v17.8b, v2.8b, v20.8b
uqadd v18.8b, v0.8b, v22.8b
uqadd v19.8b, v1.8b, v23.8b
/* convert the result to r5g6b5 and store it into {v14} */
ushll v14.8h, v17.8b, #7
sli v14.8h, v14.8h, #1
ushll v8.8h, v19.8b, #7
sli v8.8h, v8.8h, #1
ushll v9.8h, v18.8b, #7
sli v9.8h, v9.8h, #1
sri v14.8h, v8.8h, #5
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
/*
* OK, now we got almost everything that we need. Using the above two
* macros, the work can be done right. But now we want to optimize
* it a bit. ARM Cortex-A8 is an in-order core, and benefits really
* a lot from good code scheduling and software pipelining.
*
* Let's construct some code, which will run in the core main loop.
* Some pseudo-code of the main loop will look like this:
* head
* while (...) {
* tail
* head
* }
* tail
*
* It may look a bit weird, but this setup allows to hide instruction
* latencies better and also utilize dual-issue capability more
* efficiently (make pairs of load-store and ALU instructions).
*
* So what we need now is a '*_tail_head' macro, which will be used
* in the core main loop. A trivial straightforward implementation
* of this macro would look like this:
*
* pixman_composite_over_8888_0565_process_pixblock_tail
* st1 {v28.4h, v29.4h}, [DST_W], #32
* ld1 {v4.4h, v5.4h}, [DST_R], #16
* ld4 {v0.2s, v1.2s, v2.2s, v3.2s}, [SRC], #32
* pixman_composite_over_8888_0565_process_pixblock_head
* cache_preload 8, 8
*
* Now it also got some VLD/VST instructions. We simply can't move from
* processing one block of pixels to the other one with just arithmetics.
* The previously processed data needs to be written to memory and new
* data needs to be fetched. Fortunately, this main loop does not deal
* with partial leading/trailing pixels and can load/store a full block
* of pixels in a bulk. Additionally, destination buffer is already
* 16 bytes aligned here (which is good for performance).
*
* New things here are DST_R, DST_W, SRC and MASK identifiers. These
* are the aliases for ARM registers which are used as pointers for
* accessing data. We maintain separate pointers for reading and writing
* destination buffer (DST_R and DST_W).
*
* Another new thing is 'cache_preload' macro. It is used for prefetching
* data into CPU L2 cache and improve performance when dealing with large
* images which are far larger than cache size. It uses one argument
* (actually two, but they need to be the same here) - number of pixels
* in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
* details about this macro. Moreover, if good performance is needed
* the code from this macro needs to be copied into '*_tail_head' macro
* and mixed with the rest of code for optimal instructions scheduling.
* We are actually doing it below.
*
* Now after all the explanations, here is the optimized code.
* Different instruction streams (originaling from '*_head', '*_tail'
* and 'cache_preload' macro) use different indentation levels for
* better readability. Actually taking the code from one of these
* indentation levels and ignoring a few LD/ST instructions would
* result in exactly the code from '*_head', '*_tail' or 'cache_preload'
* macro!
*/
#if 1
.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
uqadd v17.8b, v2.8b, v20.8b
ld1 {v4.4h, v5.4h}, [DST_R], #16
mov v4.d[1], v5.d[0]
uqadd v18.8b, v0.8b, v22.8b
uqadd v19.8b, v1.8b, v23.8b
shrn v6.8b, v4.8h, #8
fetch_src_pixblock
shrn v7.8b, v4.8h, #3
sli v4.8h, v4.8h, #5
ushll v14.8h, v17.8b, #7
sli v14.8h, v14.8h, #1
PF add PF_X, PF_X, #8
ushll v8.8h, v19.8b, #7
sli v8.8h, v8.8h, #1
PF tst PF_CTL, #0xF
sri v6.8b, v6.8b, #5
PF beq 10f
PF add PF_X, PF_X, #8
10:
mvn v3.8b, v3.8b
PF beq 10f
PF sub PF_CTL, PF_CTL, #1
10:
sri v7.8b, v7.8b, #6
shrn v30.8b, v4.8h, #2
umull v10.8h, v3.8b, v6.8b
PF lsl DUMMY, PF_X, #src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
umull v11.8h, v3.8b, v7.8b
umull v12.8h, v3.8b, v30.8b
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
sri v14.8h, v8.8h, #5
PF cmp PF_X, ORIG_W
ushll v9.8h, v18.8b, #7
sli v9.8h, v9.8h, #1
urshr v17.8h, v10.8h, #8
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
urshr v19.8h, v11.8h, #8
urshr v18.8h, v12.8h, #8
PF ble 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
10:
raddhn v20.8b, v10.8h, v17.8h
raddhn v23.8b, v11.8h, v19.8h
PF ble 10f
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_SRC, #1
10:
raddhn v22.8b, v12.8h, v18.8h
st1 {v14.8h}, [DST_W], #16
.endm
#else
/* If we did not care much about the performance, we would just use this... */
.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
pixman_composite_over_8888_0565_process_pixblock_tail
st1 {v14.8h}, [DST_W], #16
ld1 {v4.4h, v4.5h}, [DST_R], #16
fetch_src_pixblock
pixman_composite_over_8888_0565_process_pixblock_head
cache_preload 8, 8
.endm
#endif
/*
* And now the final part. We are using 'generate_composite_function' macro
* to put all the stuff together. We are specifying the name of the function
* which we want to get, number of bits per pixel for the source, mask and
* destination (0 if unused, like mask in this case). Next come some bit
* flags:
* FLAG_DST_READWRITE - tells that the destination buffer is both read
* and written, for write-only buffer we would use
* FLAG_DST_WRITEONLY flag instead
* FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
* and separate color channels for 32bpp format.
* The next things are:
* - the number of pixels processed per iteration (8 in this case, because
* that's the maximum what can fit into four 64-bit NEON registers).
* - prefetch distance, measured in pixel blocks. In this case it is 5 times
* by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
* prefetch distance can be selected by running some benchmarks.
*
* After that we specify some macros, these are 'default_init',
* 'default_cleanup' here which are empty (but it is possible to have custom
* init/cleanup macros to be able to save/restore some extra NEON registers
* like d8-d15 or do anything else) followed by
* 'pixman_composite_over_8888_0565_process_pixblock_head',
* 'pixman_composite_over_8888_0565_process_pixblock_tail' and
* 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
* which we got implemented above.
*
* The last part is the NEON registers allocation scheme.
*/
generate_composite_function \
pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_0565_process_pixblock_head, \
pixman_composite_over_8888_0565_process_pixblock_tail, \
pixman_composite_over_8888_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_n_0565_process_pixblock_head
/* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format
and put data into v6 - red, v7 - green, v30 - blue */
mov v4.d[1], v5.d[0]
shrn v6.8b, v4.8h, #8
shrn v7.8b, v4.8h, #3
sli v4.8h, v4.8h, #5
sri v6.8b, v6.8b, #5
sri v7.8b, v7.8b, #6
shrn v30.8b, v4.8h, #2
/* now do alpha blending, storing results in 8-bit planar format
into v20 - red, v23 - green, v22 - blue */
umull v10.8h, v3.8b, v6.8b
umull v11.8h, v3.8b, v7.8b
umull v12.8h, v3.8b, v30.8b
urshr v13.8h, v10.8h, #8
urshr v14.8h, v11.8h, #8
urshr v15.8h, v12.8h, #8
raddhn v20.8b, v10.8h, v13.8h
raddhn v23.8b, v11.8h, v14.8h
raddhn v22.8b, v12.8h, v15.8h
.endm
.macro pixman_composite_over_n_0565_process_pixblock_tail
/* ... continue alpha blending */
uqadd v17.8b, v2.8b, v20.8b
uqadd v18.8b, v0.8b, v22.8b
uqadd v19.8b, v1.8b, v23.8b
/* convert the result to r5g6b5 and store it into {v14} */
ushll v14.8h, v17.8b, #7
sli v14.8h, v14.8h, #1
ushll v8.8h, v19.8b, #7
sli v8.8h, v8.8h, #1
ushll v9.8h, v18.8b, #7
sli v9.8h, v9.8h, #1
sri v14.8h, v8.8h, #5
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_n_0565_process_pixblock_tail_head
pixman_composite_over_n_0565_process_pixblock_tail
ld1 {v4.4h, v5.4h}, [DST_R], #16
st1 {v14.8h}, [DST_W], #16
pixman_composite_over_n_0565_process_pixblock_head
cache_preload 8, 8
.endm
.macro pixman_composite_over_n_0565_init
mov v3.s[0], w4
dup v0.8b, v3.b[0]
dup v1.8b, v3.b[1]
dup v2.8b, v3.b[2]
dup v3.8b, v3.b[3]
mvn v3.8b, v3.8b /* invert source alpha */
.endm
generate_composite_function \
pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_0565_init, \
default_cleanup, \
pixman_composite_over_n_0565_process_pixblock_head, \
pixman_composite_over_n_0565_process_pixblock_tail, \
pixman_composite_over_n_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_8888_0565_process_pixblock_head
ushll v8.8h, v1.8b, #7
sli v8.8h, v8.8h, #1
ushll v14.8h, v2.8b, #7
sli v14.8h, v14.8h, #1
ushll v9.8h, v0.8b, #7
sli v9.8h, v9.8h, #1
.endm
.macro pixman_composite_src_8888_0565_process_pixblock_tail
sri v14.8h, v8.8h, #5
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
sri v14.8h, v8.8h, #5
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
fetch_src_pixblock
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
PF cmp PF_X, ORIG_W
PF lsl DUMMY, PF_X, #src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
ushll v8.8h, v1.8b, #7
sli v8.8h, v8.8h, #1
st1 {v14.8h}, [DST_W], #16
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
PF subs PF_CTL, PF_CTL, #0x10
10:
ushll v14.8h, v2.8b, #7
sli v14.8h, v14.8h, #1
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
10:
ushll v9.8h, v0.8b, #7
sli v9.8h, v9.8h, #1
.endm
generate_composite_function \
pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_0565_process_pixblock_head, \
pixman_composite_src_8888_0565_process_pixblock_tail, \
pixman_composite_src_8888_0565_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_src_0565_8888_process_pixblock_head
mov v0.d[1], v1.d[0]
shrn v30.8b, v0.8h, #8
shrn v29.8b, v0.8h, #3
sli v0.8h, v0.8h, #5
movi v31.8b, #255
sri v30.8b, v30.8b, #5
sri v29.8b, v29.8b, #6
shrn v28.8b, v0.8h, #2
.endm
.macro pixman_composite_src_0565_8888_process_pixblock_tail
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_src_0565_8888_process_pixblock_tail_head
pixman_composite_src_0565_8888_process_pixblock_tail
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
fetch_src_pixblock
pixman_composite_src_0565_8888_process_pixblock_head
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0565_8888_process_pixblock_head, \
pixman_composite_src_0565_8888_process_pixblock_tail, \
pixman_composite_src_0565_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8_8_process_pixblock_head
uqadd v28.8b, v0.8b, v4.8b
uqadd v29.8b, v1.8b, v5.8b
uqadd v30.8b, v2.8b, v6.8b
uqadd v31.8b, v3.8b, v7.8b
.endm
.macro pixman_composite_add_8_8_process_pixblock_tail
.endm
.macro pixman_composite_add_8_8_process_pixblock_tail_head
fetch_src_pixblock
PF add PF_X, PF_X, #32
PF tst PF_CTL, #0xF
ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
PF beq 10f
PF add PF_X, PF_X, #32
PF sub PF_CTL, PF_CTL, #1
10:
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF cmp PF_X, ORIG_W
PF lsl DUMMY, PF_X, #src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
PF subs PF_CTL, PF_CTL, #0x10
10:
uqadd v28.8b, v0.8b, v4.8b
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
uqadd v29.8b, v1.8b, v5.8b
uqadd v30.8b, v2.8b, v6.8b
uqadd v31.8b, v3.8b, v7.8b
.endm
generate_composite_function \
pixman_composite_add_8_8_asm_neon, 8, 0, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8_8_process_pixblock_head, \
pixman_composite_add_8_8_process_pixblock_tail, \
pixman_composite_add_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8888_8888_process_pixblock_tail_head
fetch_src_pixblock
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF cmp PF_X, ORIG_W
PF lsl DUMMY, PF_X, #src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
PF subs PF_CTL, PF_CTL, #0x10
10:
uqadd v28.8b, v0.8b, v4.8b
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
uqadd v29.8b, v1.8b, v5.8b
uqadd v30.8b, v2.8b, v6.8b
uqadd v31.8b, v3.8b, v7.8b
.endm
generate_composite_function \
pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8_8_process_pixblock_head, \
pixman_composite_add_8_8_process_pixblock_tail, \
pixman_composite_add_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_add_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_add_8_8_process_pixblock_head, \
pixman_composite_add_8_8_process_pixblock_tail, \
pixman_composite_add_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_head
mvn v24.8b, v3.8b /* get inverted alpha */
/* do alpha blending */
umull v8.8h, v24.8b, v4.8b
umull v9.8h, v24.8b, v5.8b
umull v10.8h, v24.8b, v6.8b
umull v11.8h, v24.8b, v7.8b
.endm
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail
urshr v14.8h, v8.8h, #8
urshr v15.8h, v9.8h, #8
urshr v16.8h, v10.8h, #8
urshr v17.8h, v11.8h, #8
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
raddhn v30.8b, v16.8h, v10.8h
raddhn v31.8b, v17.8h, v11.8h
.endm
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
urshr v14.8h, v8.8h, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
urshr v15.8h, v9.8h, #8
urshr v16.8h, v10.8h, #8
urshr v17.8h, v11.8h, #8
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
PF cmp PF_X, ORIG_W
raddhn v30.8b, v16.8h, v10.8h
raddhn v31.8b, v17.8h, v11.8h
fetch_src_pixblock
PF lsl DUMMY, PF_X, #src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
mvn v22.8b, v3.8b
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v8.8h, v22.8b, v4.8b
PF ble 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
umull v9.8h, v22.8b, v5.8b
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
10:
umull v10.8h, v22.8b, v6.8b
PF ble 10f
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
umull v11.8h, v22.8b, v7.8b
.endm
generate_composite_function_single_scanline \
pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_out_reverse_8888_8888_process_pixblock_head, \
pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \
pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_8888_8888_process_pixblock_head
pixman_composite_out_reverse_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail
pixman_composite_out_reverse_8888_8888_process_pixblock_tail
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
urshr v14.8h, v8.8h, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
urshr v15.8h, v9.8h, #8
urshr v16.8h, v10.8h, #8
urshr v17.8h, v11.8h, #8
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
PF cmp PF_X, ORIG_W
raddhn v30.8b, v16.8h, v10.8h
raddhn v31.8b, v17.8h, v11.8h
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
fetch_src_pixblock
PF lsl DUMMY, PF_X, #src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
mvn v22.8b, v3.8b
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v8.8h, v22.8b, v4.8b
PF ble 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
umull v9.8h, v22.8b, v5.8b
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
10:
umull v10.8h, v22.8b, v6.8b
PF ble 10f
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
umull v11.8h, v22.8b, v7.8b
.endm
generate_composite_function \
pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_process_pixblock_head
/* deinterleaved source pixels in {v0, v1, v2, v3} */
/* inverted alpha in {v24} */
/* destination pixels in {v4, v5, v6, v7} */
umull v8.8h, v24.8b, v4.8b
umull v9.8h, v24.8b, v5.8b
umull v10.8h, v24.8b, v6.8b
umull v11.8h, v24.8b, v7.8b
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail
urshr v14.8h, v8.8h, #8
urshr v15.8h, v9.8h, #8
urshr v16.8h, v10.8h, #8
urshr v17.8h, v11.8h, #8
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
raddhn v30.8b, v16.8h, v10.8h
raddhn v31.8b, v17.8h, v11.8h
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail_head
urshr v14.8h, v8.8h, #8
urshr v15.8h, v9.8h, #8
urshr v16.8h, v10.8h, #8
urshr v17.8h, v11.8h, #8
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
raddhn v30.8b, v16.8h, v10.8h
raddhn v31.8b, v17.8h, v11.8h
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
uqadd v28.8b, v0.8b, v28.8b
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0x0F
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
PF cmp PF_X, ORIG_W
umull v8.8h, v24.8b, v4.8b
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
umull v9.8h, v24.8b, v5.8b
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v10.8h, v24.8b, v6.8b
PF subs PF_CTL, PF_CTL, #0x10
umull v11.8h, v24.8b, v7.8b
PF ble 10f
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
.macro pixman_composite_over_n_8888_init
mov v3.s[0], w4
dup v0.8b, v3.b[0]
dup v1.8b, v3.b[1]
dup v2.8b, v3.b[2]
dup v3.8b, v3.b[3]
mvn v24.8b, v3.8b /* get inverted alpha */
.endm
generate_composite_function \
pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_n_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head
urshr v14.8h, v8.8h, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
urshr v15.8h, v9.8h, #8
urshr v12.8h, v10.8h, #8
urshr v13.8h, v11.8h, #8
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
PF cmp PF_X, ORIG_W
raddhn v30.8b, v12.8h, v10.8h
raddhn v31.8b, v13.8h, v11.8h
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_R], #32
mvn v22.8b, v3.8b
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF blt 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v8.8h, v22.8b, v4.8b
PF blt 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
umull v9.8h, v22.8b, v5.8b
umull v10.8h, v22.8b, v6.8b
PF blt 10f
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
umull v11.8h, v22.8b, v7.8b
.endm
.macro pixman_composite_over_reverse_n_8888_init
mov v7.s[0], w4
dup v4.8b, v7.b[0]
dup v5.8b, v7.b[1]
dup v6.8b, v7.b[2]
dup v7.8b, v7.b[3]
.endm
generate_composite_function \
pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_reverse_n_8888_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
4, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_8888_8_0565_process_pixblock_head
umull v0.8h, v24.8b, v8.8b /* IN for SRC pixels (part1) */
umull v1.8h, v24.8b, v9.8b
umull v2.8h, v24.8b, v10.8b
umull v3.8h, v24.8b, v11.8b
mov v4.d[1], v5.d[0]
shrn v25.8b, v4.8h, #8 /* convert DST_R data to 32-bpp (part1) */
shrn v26.8b, v4.8h, #3
sli v4.8h, v4.8h, #5
urshr v17.8h, v0.8h, #8 /* IN for SRC pixels (part2) */
urshr v18.8h, v1.8h, #8
urshr v19.8h, v2.8h, #8
urshr v20.8h, v3.8h, #8
raddhn v0.8b, v0.8h, v17.8h
raddhn v1.8b, v1.8h, v18.8h
raddhn v2.8b, v2.8h, v19.8h
raddhn v3.8b, v3.8h, v20.8h
sri v25.8b, v25.8b, #5 /* convert DST_R data to 32-bpp (part2) */
sri v26.8b, v26.8b, #6
mvn v3.8b, v3.8b
shrn v30.8b, v4.8h, #2
umull v18.8h, v3.8b, v25.8b /* now do alpha blending */
umull v19.8h, v3.8b, v26.8b
umull v20.8h, v3.8b, v30.8b
.endm
.macro pixman_composite_over_8888_8_0565_process_pixblock_tail
/* 3 cycle bubble (after vmull.u8) */
urshr v5.8h, v18.8h, #8
urshr v6.8h, v19.8h, #8
urshr v7.8h, v20.8h, #8
raddhn v17.8b, v18.8h, v5.8h
raddhn v19.8b, v19.8h, v6.8h
raddhn v18.8b, v20.8h, v7.8h
uqadd v5.8b, v2.8b, v17.8b
/* 1 cycle bubble */
uqadd v6.8b, v0.8b, v18.8b
uqadd v7.8b, v1.8b, v19.8b
ushll v14.8h, v5.8b, #7 /* convert to 16bpp */
sli v14.8h, v14.8h, #1
ushll v18.8h, v7.8b, #7
sli v18.8h, v18.8h, #1
ushll v19.8h, v6.8b, #7
sli v19.8h, v19.8h, #1
sri v14.8h, v18.8h, #5
/* 1 cycle bubble */
sri v14.8h, v19.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
.macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head
#if 0
ld1 {v4.8h}, [DST_R], #16
shrn v25.8b, v4.8h, #8
fetch_mask_pixblock
shrn v26.8b, v4.8h, #3
fetch_src_pixblock
umull v22.8h, v24.8b, v10.8b
urshr v13.8h, v18.8h, #8
urshr v11.8h, v19.8h, #8
urshr v15.8h, v20.8h, #8
raddhn v17.8b, v18.8h, v13.8h
raddhn v19.8b, v19.8h, v11.8h
raddhn v18.8b, v20.8h, v15.8h
uqadd v17.8b, v2.8b, v17.8b
umull v21.8h, v24.8b, v9.8b
uqadd v18.8b, v0.8b, v18.8b
uqadd v19.8b, v1.8b, v19.8b
ushll v14.8h, v17.8b, #7
sli v14.8h, v14.8h, #1
umull v20.8h, v24.8b, v8.8b
ushll v18.8h, v18.8b, #7
sli v18.8h, v18.8h, #1
ushll v19.8h, v19.8b, #7
sli v19.8h, v19.8h, #1
sri v14.8h, v18.8h, #5
umull v23.8h, v24.8b, v11.8b
sri v14.8h, v19.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
cache_preload 8, 8
sli v4.8h, v4.8h, #5
urshr v16.8h, v20.8h, #8
urshr v17.8h, v21.8h, #8
urshr v18.8h, v22.8h, #8
urshr v19.8h, v23.8h, #8
raddhn v0.8b, v20.8h, v16.8h
raddhn v1.8b, v21.8h, v17.8h
raddhn v2.8b, v22.8h, v18.8h
raddhn v3.8b, v23.8h, v19.8h
sri v25.8b, v25.8b, #5
sri v26.8b, v26.8b, #6
mvn v3.8b, v3.8b
shrn v30.8b, v4.8h, #2
st1 {v14.8h}, [DST_W], #16
umull v18.8h, v3.8b, v25.8b
umull v19.8h, v3.8b, v26.8b
umull v20.8h, v3.8b, v30.8b
#else
pixman_composite_over_8888_8_0565_process_pixblock_tail
st1 {v28.4h, v29.4h}, [DST_W], #16
ld1 {v4.4h, v5.4h}, [DST_R], #16
fetch_mask_pixblock
fetch_src_pixblock
pixman_composite_over_8888_8_0565_process_pixblock_head
#endif
.endm
generate_composite_function \
pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
/*
* This function needs a special initialization of solid mask.
* Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET
* offset, split into color components and replicated in d8-d11
* registers. Additionally, this function needs all the NEON registers,
* so it has to save d8-d15 registers which are callee saved according
* to ABI. These registers are restored from 'cleanup' macro. All the
* other NEON registers are caller saved, so can be clobbered freely
* without introducing any problems.
*/
.macro pixman_composite_over_n_8_0565_init
mov v11.s[0], w4
dup v8.8b, v11.b[0]
dup v9.8b, v11.b[1]
dup v10.8b, v11.b[2]
dup v11.8b, v11.b[3]
.endm
.macro pixman_composite_over_n_8_0565_cleanup
.endm
generate_composite_function \
pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8_0565_init, \
pixman_composite_over_n_8_0565_cleanup, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_8888_n_0565_init
mov v24.s[0], w6
dup v24.8b, v24.b[3]
.endm
.macro pixman_composite_over_8888_n_0565_cleanup
.endm
generate_composite_function \
pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_8888_n_0565_init, \
pixman_composite_over_8888_n_0565_cleanup, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0565_0565_process_pixblock_head
.endm
.macro pixman_composite_src_0565_0565_process_pixblock_tail
.endm
.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32
fetch_src_pixblock
cache_preload 16, 16
.endm
generate_composite_function \
pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
FLAG_DST_WRITEONLY, \
16, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0565_0565_process_pixblock_head, \
pixman_composite_src_0565_0565_process_pixblock_tail, \
pixman_composite_src_0565_0565_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_8_process_pixblock_head
.endm
.macro pixman_composite_src_n_8_process_pixblock_tail
.endm
.macro pixman_composite_src_n_8_process_pixblock_tail_head
st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], 32
.endm
.macro pixman_composite_src_n_8_init
mov v0.s[0], w4
dup v3.8b, v0.b[0]
dup v2.8b, v0.b[0]
dup v1.8b, v0.b[0]
dup v0.8b, v0.b[0]
.endm
.macro pixman_composite_src_n_8_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
FLAG_DST_WRITEONLY, \
32, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_8_init, \
pixman_composite_src_n_8_cleanup, \
pixman_composite_src_n_8_process_pixblock_head, \
pixman_composite_src_n_8_process_pixblock_tail, \
pixman_composite_src_n_8_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_0565_process_pixblock_head
.endm
.macro pixman_composite_src_n_0565_process_pixblock_tail
.endm
.macro pixman_composite_src_n_0565_process_pixblock_tail_head
st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32
.endm
.macro pixman_composite_src_n_0565_init
mov v0.s[0], w4
dup v3.4h, v0.h[0]
dup v2.4h, v0.h[0]
dup v1.4h, v0.h[0]
dup v0.4h, v0.h[0]
.endm
.macro pixman_composite_src_n_0565_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
FLAG_DST_WRITEONLY, \
16, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_0565_init, \
pixman_composite_src_n_0565_cleanup, \
pixman_composite_src_n_0565_process_pixblock_head, \
pixman_composite_src_n_0565_process_pixblock_tail, \
pixman_composite_src_n_0565_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_8888_process_pixblock_head
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail_head
st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32
.endm
.macro pixman_composite_src_n_8888_init
mov v0.s[0], w4
dup v3.2s, v0.s[0]
dup v2.2s, v0.s[0]
dup v1.2s, v0.s[0]
dup v0.2s, v0.s[0]
.endm
.macro pixman_composite_src_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_8888_init, \
pixman_composite_src_n_8888_cleanup, \
pixman_composite_src_n_8888_process_pixblock_head, \
pixman_composite_src_n_8888_process_pixblock_tail, \
pixman_composite_src_n_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32
fetch_src_pixblock
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_8888_process_pixblock_head, \
pixman_composite_src_8888_8888_process_pixblock_tail, \
pixman_composite_src_8888_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_x888_8888_process_pixblock_head
orr v0.8b, v0.8b, v4.8b
orr v1.8b, v1.8b, v4.8b
orr v2.8b, v2.8b, v4.8b
orr v3.8b, v3.8b, v4.8b
.endm
.macro pixman_composite_src_x888_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_x888_8888_process_pixblock_tail_head
st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32
fetch_src_pixblock
orr v0.8b, v0.8b, v4.8b
orr v1.8b, v1.8b, v4.8b
orr v2.8b, v2.8b, v4.8b
orr v3.8b, v3.8b, v4.8b
cache_preload 8, 8
.endm
.macro pixman_composite_src_x888_8888_init
movi v4.2s, #0xff, lsl 24
.endm
generate_composite_function \
pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
pixman_composite_src_x888_8888_init, \
default_cleanup, \
pixman_composite_src_x888_8888_process_pixblock_head, \
pixman_composite_src_x888_8888_process_pixblock_tail, \
pixman_composite_src_x888_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_n_8_8888_process_pixblock_head
/* expecting solid source in {v0, v1, v2, v3} */
/* mask is in v24 (v25, v26, v27 are unused) */
/* in */
umull v8.8h, v24.8b, v0.8b
umull v9.8h, v24.8b, v1.8b
umull v10.8h, v24.8b, v2.8b
umull v11.8h, v24.8b, v3.8b
ursra v8.8h, v8.8h, #8
ursra v9.8h, v9.8h, #8
ursra v10.8h, v10.8h, #8
ursra v11.8h, v11.8h, #8
.endm
.macro pixman_composite_src_n_8_8888_process_pixblock_tail
rshrn v28.8b, v8.8h, #8
rshrn v29.8b, v9.8h, #8
rshrn v30.8b, v10.8h, #8
rshrn v31.8b, v11.8h, #8
.endm
.macro pixman_composite_src_n_8_8888_process_pixblock_tail_head
fetch_mask_pixblock
PF add PF_X, PF_X, #8
rshrn v28.8b, v8.8h, #8
PF tst PF_CTL, #0x0F
rshrn v29.8b, v9.8h, #8
PF beq 10f
PF add PF_X, PF_X, #8
10:
rshrn v30.8b, v10.8h, #8
PF beq 10f
PF sub PF_CTL, PF_CTL, #1
10:
rshrn v31.8b, v11.8h, #8
PF cmp PF_X, ORIG_W
umull v8.8h, v24.8b, v0.8b
PF lsl DUMMY, PF_X, #mask_bpp_shift
PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
umull v9.8h, v24.8b, v1.8b
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v10.8h, v24.8b, v2.8b
PF ble 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
umull v11.8h, v24.8b, v3.8b
PF ble 10f
PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
PF ldrsb DUMMY, [PF_MASK, DUMMY]
PF add PF_MASK, PF_MASK, #1
10:
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
ursra v8.8h, v8.8h, #8
ursra v9.8h, v9.8h, #8
ursra v10.8h, v10.8h, #8
ursra v11.8h, v11.8h, #8
.endm
.macro pixman_composite_src_n_8_8888_init
mov v3.s[0], w4
dup v0.8b, v3.b[0]
dup v1.8b, v3.b[1]
dup v2.8b, v3.b[2]
dup v3.8b, v3.b[3]
.endm
.macro pixman_composite_src_n_8_8888_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_src_n_8_8888_init, \
pixman_composite_src_n_8_8888_cleanup, \
pixman_composite_src_n_8_8888_process_pixblock_head, \
pixman_composite_src_n_8_8888_process_pixblock_tail, \
pixman_composite_src_n_8_8888_process_pixblock_tail_head, \
/******************************************************************************/
.macro pixman_composite_src_n_8_8_process_pixblock_head
umull v0.8h, v24.8b, v16.8b
umull v1.8h, v25.8b, v16.8b
umull v2.8h, v26.8b, v16.8b
umull v3.8h, v27.8b, v16.8b
ursra v0.8h, v0.8h, #8
ursra v1.8h, v1.8h, #8
ursra v2.8h, v2.8h, #8
ursra v3.8h, v3.8h, #8
.endm
.macro pixman_composite_src_n_8_8_process_pixblock_tail
rshrn v28.8b, v0.8h, #8
rshrn v29.8b, v1.8h, #8
rshrn v30.8b, v2.8h, #8
rshrn v31.8b, v3.8h, #8
.endm
.macro pixman_composite_src_n_8_8_process_pixblock_tail_head
fetch_mask_pixblock
PF add PF_X, PF_X, #8
rshrn v28.8b, v0.8h, #8
PF tst PF_CTL, #0x0F
rshrn v29.8b, v1.8h, #8
PF beq 10f
PF add PF_X, PF_X, #8
10:
rshrn v30.8b, v2.8h, #8
PF beq 10f
PF sub PF_CTL, PF_CTL, #1
10:
rshrn v31.8b, v3.8h, #8
PF cmp PF_X, ORIG_W
umull v0.8h, v24.8b, v16.8b
PF lsl DUMMY, PF_X, mask_bpp_shift
PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
umull v1.8h, v25.8b, v16.8b
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v2.8h, v26.8b, v16.8b
PF ble 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
umull v3.8h, v27.8b, v16.8b
PF ble 10f
PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
PF ldrsb DUMMY, [PF_MASK, DUMMY]
PF add PF_MASK, PF_MASK, #1
10:
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
ursra v0.8h, v0.8h, #8
ursra v1.8h, v1.8h, #8
ursra v2.8h, v2.8h, #8
ursra v3.8h, v3.8h, #8
.endm
.macro pixman_composite_src_n_8_8_init
mov v16.s[0], w4
dup v16.8b, v16.b[3]
.endm
.macro pixman_composite_src_n_8_8_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \
FLAG_DST_WRITEONLY, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_src_n_8_8_init, \
pixman_composite_src_n_8_8_cleanup, \
pixman_composite_src_n_8_8_process_pixblock_head, \
pixman_composite_src_n_8_8_process_pixblock_tail, \
pixman_composite_src_n_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8_8888_process_pixblock_head
/* expecting deinterleaved source data in {v8, v9, v10, v11} */
/* v8 - blue, v9 - green, v10 - red, v11 - alpha */
/* and destination data in {v4, v5, v6, v7} */
/* mask is in v24 (v25, v26, v27 are unused) */
/* in */
umull v12.8h, v24.8b, v8.8b
umull v13.8h, v24.8b, v9.8b
umull v14.8h, v24.8b, v10.8b
umull v15.8h, v24.8b, v11.8b
urshr v16.8h, v12.8h, #8
urshr v17.8h, v13.8h, #8
urshr v18.8h, v14.8h, #8
urshr v19.8h, v15.8h, #8
raddhn v0.8b, v12.8h, v16.8h
raddhn v1.8b, v13.8h, v17.8h
raddhn v2.8b, v14.8h, v18.8h
raddhn v3.8b, v15.8h, v19.8h
mvn v25.8b, v3.8b /* get inverted alpha */
/* source: v0 - blue, v1 - green, v2 - red, v3 - alpha */
/* destination: v4 - blue, v5 - green, v6 - red, v7 - alpha */
/* now do alpha blending */
umull v12.8h, v25.8b, v4.8b
umull v13.8h, v25.8b, v5.8b
umull v14.8h, v25.8b, v6.8b
umull v15.8h, v25.8b, v7.8b
.endm
.macro pixman_composite_over_n_8_8888_process_pixblock_tail
urshr v16.8h, v12.8h, #8
urshr v17.8h, v13.8h, #8
urshr v18.8h, v14.8h, #8
urshr v19.8h, v15.8h, #8
raddhn v28.8b, v16.8h, v12.8h
raddhn v29.8b, v17.8h, v13.8h
raddhn v30.8b, v18.8h, v14.8h
raddhn v31.8b, v19.8h, v15.8h
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
.endm
.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
urshr v16.8h, v12.8h, #8
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
urshr v17.8h, v13.8h, #8
fetch_mask_pixblock
urshr v18.8h, v14.8h, #8
PF add PF_X, PF_X, #8
urshr v19.8h, v15.8h, #8
PF tst PF_CTL, #0x0F
raddhn v28.8b, v16.8h, v12.8h
PF beq 10f
PF add PF_X, PF_X, #8
10:
raddhn v29.8b, v17.8h, v13.8h
PF beq 10f
PF sub PF_CTL, PF_CTL, #1
10:
raddhn v30.8b, v18.8h, v14.8h
PF cmp PF_X, ORIG_W
raddhn v31.8b, v19.8h, v15.8h
PF lsl DUMMY, PF_X, #dst_bpp_shift
PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
umull v16.8h, v24.8b, v8.8b
PF lsl DUMMY, PF_X, #mask_bpp_shift
PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
umull v17.8h, v24.8b, v9.8b
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
10:
umull v18.8h, v24.8b, v10.8b
PF ble 10f
PF subs PF_CTL, PF_CTL, #0x10
10:
umull v19.8h, v24.8b, v11.8b
PF ble 10f
PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
PF ldrsb DUMMY, [PF_DST, DUMMY]
PF add PF_DST, PF_DST, #1
10:
uqadd v28.8b, v0.8b, v28.8b
PF ble 10f
PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
PF ldrsb DUMMY, [PF_MASK, DUMMY]
PF add PF_MASK, PF_MASK, #1
10:
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
urshr v12.8h, v16.8h, #8
urshr v13.8h, v17.8h, #8
urshr v14.8h, v18.8h, #8
urshr v15.8h, v19.8h, #8
raddhn v0.8b, v16.8h, v12.8h
raddhn v1.8b, v17.8h, v13.8h
raddhn v2.8b, v18.8h, v14.8h
raddhn v3.8b, v19.8h, v15.8h
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
mvn v25.8b, v3.8b
umull v12.8h, v25.8b, v4.8b
umull v13.8h, v25.8b, v5.8b
umull v14.8h, v25.8b, v6.8b
umull v15.8h, v25.8b, v7.8b
.endm
.macro pixman_composite_over_n_8_8888_init
mov v11.s[0], w4
dup v8.8b, v11.b[0]
dup v9.8b, v11.b[1]
dup v10.8b, v11.b[2]
dup v11.8b, v11.b[3]
.endm
.macro pixman_composite_over_n_8_8888_cleanup
.endm
generate_composite_function \
pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8_8888_init, \
pixman_composite_over_n_8_8888_cleanup, \
pixman_composite_over_n_8_8888_process_pixblock_head, \
pixman_composite_over_n_8_8888_process_pixblock_tail, \
pixman_composite_over_n_8_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8_8_process_pixblock_head
umull v0.8h, v24.8b, v8.8b
umull v1.8h, v25.8b, v8.8b
umull v2.8h, v26.8b, v8.8b
umull v3.8h, v27.8b, v8.8b
urshr v10.8h, v0.8h, #8
urshr v11.8h, v1.8h, #8
urshr v12.8h, v2.8h, #8
urshr v13.8h, v3.8h, #8
raddhn v0.8b, v0.8h, v10.8h
raddhn v1.8b, v1.8h, v11.8h
raddhn v2.8b, v2.8h, v12.8h
raddhn v3.8b, v3.8h, v13.8h
mvn v24.8b, v0.8b
mvn v25.8b, v1.8b
mvn v26.8b, v2.8b
mvn v27.8b, v3.8b
umull v10.8h, v24.8b, v4.8b
umull v11.8h, v25.8b, v5.8b
umull v12.8h, v26.8b, v6.8b
umull v13.8h, v27.8b, v7.8b
.endm
.macro pixman_composite_over_n_8_8_process_pixblock_tail
urshr v14.8h, v10.8h, #8
urshr v15.8h, v11.8h, #8
urshr v16.8h, v12.8h, #8
urshr v17.8h, v13.8h, #8
raddhn v28.8b, v14.8h, v10.8h
raddhn v29.8b, v15.8h, v11.8h
raddhn v30.8b, v16.8h, v12.8h
raddhn v31.8b, v17.8h, v13.8h
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_n_8_8_process_pixblock_tail_head
ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
pixman_composite_over_n_8_8_process_pixblock_tail
fetch_mask_pixblock
cache_preload 32, 32
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
pixman_composite_over_n_8_8_process_pixblock_head
.endm
.macro pixman_composite_over_n_8_8_init
mov v8.s[0], w4
dup v8.8b, v8.b[3]
.endm
.macro pixman_composite_over_n_8_8_cleanup
.endm
generate_composite_function \
pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8_8_init, \
pixman_composite_over_n_8_8_cleanup, \
pixman_composite_over_n_8_8_process_pixblock_head, \
pixman_composite_over_n_8_8_process_pixblock_tail, \
pixman_composite_over_n_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {v8, v9, v10, v11}
* dest in {v4, v5, v6, v7 }
* mask in {v24, v25, v26, v27}
* output: updated src in {v0, v1, v2, v3 }
* updated mask in {v24, v25, v26, v3 }
*/
umull v0.8h, v24.8b, v8.8b
umull v1.8h, v25.8b, v9.8b
umull v2.8h, v26.8b, v10.8b
umull v3.8h, v27.8b, v11.8b
umull v12.8h, v11.8b, v25.8b
umull v13.8h, v11.8b, v24.8b
umull v14.8h, v11.8b, v26.8b
urshr v15.8h, v0.8h, #8
urshr v16.8h, v1.8h, #8
urshr v17.8h, v2.8h, #8
raddhn v0.8b, v0.8h, v15.8h
raddhn v1.8b, v1.8h, v16.8h
raddhn v2.8b, v2.8h, v17.8h
urshr v15.8h, v13.8h, #8
urshr v16.8h, v12.8h, #8
urshr v17.8h, v14.8h, #8
urshr v18.8h, v3.8h, #8
raddhn v24.8b, v13.8h, v15.8h
raddhn v25.8b, v12.8h, v16.8h
raddhn v26.8b, v14.8h, v17.8h
raddhn v3.8b, v3.8h, v18.8h
/*
* 'combine_over_ca' replacement
*
* output: updated dest in {v28, v29, v30, v31}
*/
mvn v24.8b, v24.8b
mvn v25.8b, v25.8b
mvn v26.8b, v26.8b
mvn v27.8b, v3.8b
umull v12.8h, v24.8b, v4.8b
umull v13.8h, v25.8b, v5.8b
umull v14.8h, v26.8b, v6.8b
umull v15.8h, v27.8b, v7.8b
.endm
.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail
/* ... continue 'combine_over_ca' replacement */
urshr v16.8h, v12.8h, #8
urshr v17.8h, v13.8h, #8
urshr v18.8h, v14.8h, #8
urshr v19.8h, v15.8h, #8
raddhn v28.8b, v16.8h, v12.8h
raddhn v29.8b, v17.8h, v13.8h
raddhn v30.8b, v18.8h, v14.8h
raddhn v31.8b, v19.8h, v15.8h
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
.endm
.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head
urshr v16.8h, v12.8h, #8
urshr v17.8h, v13.8h, #8
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
urshr v18.8h, v14.8h, #8
urshr v19.8h, v15.8h, #8
raddhn v28.8b, v16.8h, v12.8h
raddhn v29.8b, v17.8h, v13.8h
raddhn v30.8b, v18.8h, v14.8h
raddhn v31.8b, v19.8h, v15.8h
fetch_mask_pixblock
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
cache_preload 8, 8
pixman_composite_over_n_8888_8888_ca_process_pixblock_head
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
.macro pixman_composite_over_n_8888_8888_ca_init
mov v13.s[0], w4
dup v8.8b, v13.b[0]
dup v9.8b, v13.b[1]
dup v10.8b, v13.b[2]
dup v11.8b, v13.b[3]
.endm
.macro pixman_composite_over_n_8888_8888_ca_cleanup
.endm
generate_composite_function \
pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_8888_ca_init, \
pixman_composite_over_n_8888_8888_ca_cleanup, \
pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \
pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \
pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {v8, v9, v10, v11} [B, G, R, A]
* mask in {v24, v25, v26} [B, G, R]
* output: updated src in {v0, v1, v2 } [B, G, R]
* updated mask in {v24, v25, v26} [B, G, R]
*/
umull v0.8h, v24.8b, v8.8b
umull v1.8h, v25.8b, v9.8b
umull v2.8h, v26.8b, v10.8b
umull v12.8h, v11.8b, v24.8b
umull v13.8h, v11.8b, v25.8b
umull v14.8h, v11.8b, v26.8b
urshr v15.8h, v0.8h, #8
urshr v16.8h, v1.8h, #8
urshr v17.8h, v2.8h, #8
raddhn v0.8b, v0.8h, v15.8h
raddhn v1.8b, v1.8h, v16.8h
raddhn v2.8b, v2.8h, v17.8h
urshr v19.8h, v12.8h, #8
urshr v20.8h, v13.8h, #8
urshr v21.8h, v14.8h, #8
raddhn v24.8b, v12.8h, v19.8h
raddhn v25.8b, v13.8h, v20.8h
/*
* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format
* and put data into v16 - blue, v17 - green, v18 - red
*/
mov v4.d[1], v5.d[0]
shrn v17.8b, v4.8h, #3
shrn v18.8b, v4.8h, #8
raddhn v26.8b, v14.8h, v21.8h
sli v4.8h, v4.8h, #5
sri v18.8b, v18.8b, #5
sri v17.8b, v17.8b, #6
/*
* 'combine_over_ca' replacement
*
* output: updated dest in v16 - blue, v17 - green, v18 - red
*/
mvn v24.8b, v24.8b
mvn v25.8b, v25.8b
shrn v16.8b, v4.8h, #2
mvn v26.8b, v26.8b
umull v5.8h, v16.8b, v24.8b
umull v6.8h, v17.8b, v25.8b
umull v7.8h, v18.8b, v26.8b
.endm
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail
/* ... continue 'combine_over_ca' replacement */
urshr v13.8h, v5.8h, #8
urshr v14.8h, v6.8h, #8
urshr v15.8h, v7.8h, #8
raddhn v16.8b, v13.8h, v5.8h
raddhn v17.8b, v14.8h, v6.8h
raddhn v18.8b, v15.8h, v7.8h
uqadd v16.8b, v0.8b, v16.8b
uqadd v17.8b, v1.8b, v17.8b
uqadd v18.8b, v2.8b, v18.8b
/*
* convert the results in v16, v17, v18 to r5g6b5 and store
* them into {v14}
*/
ushll v14.8h, v18.8b, #7
sli v14.8h, v14.8h, #1
ushll v12.8h, v17.8b, #7
sli v12.8h, v12.8h, #1
ushll v13.8h, v16.8b, #7
sli v13.8h, v13.8h, #1
sri v14.8h, v12.8h, #5
sri v14.8h, v13.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
fetch_mask_pixblock
urshr v13.8h, v5.8h, #8
urshr v14.8h, v6.8h, #8
ld1 {v4.8h}, [DST_R], #16
urshr v15.8h, v7.8h, #8
raddhn v16.8b, v13.8h, v5.8h
raddhn v17.8b, v14.8h, v6.8h
raddhn v18.8b, v15.8h, v7.8h
mov v5.d[0], v4.d[1]
/* process_pixblock_head */
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {v8, v9, v10, v11} [B, G, R, A]
* mask in {v24, v25, v26} [B, G, R]
* output: updated src in {v0, v1, v2 } [B, G, R]
* updated mask in {v24, v25, v26} [B, G, R]
*/
uqadd v16.8b, v0.8b, v16.8b
uqadd v17.8b, v1.8b, v17.8b
uqadd v18.8b, v2.8b, v18.8b
umull v0.8h, v24.8b, v8.8b
umull v1.8h, v25.8b, v9.8b
umull v2.8h, v26.8b, v10.8b
/*
* convert the result in v16, v17, v18 to r5g6b5 and store
* it into {v14}
*/
ushll v14.8h, v18.8b, #7
sli v14.8h, v14.8h, #1
ushll v18.8h, v16.8b, #7
sli v18.8h, v18.8h, #1
ushll v19.8h, v17.8b, #7
sli v19.8h, v19.8h, #1
umull v12.8h, v11.8b, v24.8b
sri v14.8h, v19.8h, #5
umull v13.8h, v11.8b, v25.8b
umull v15.8h, v11.8b, v26.8b
sri v14.8h, v18.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
cache_preload 8, 8
urshr v16.8h, v0.8h, #8
urshr v17.8h, v1.8h, #8
urshr v18.8h, v2.8h, #8
raddhn v0.8b, v0.8h, v16.8h
raddhn v1.8b, v1.8h, v17.8h
raddhn v2.8b, v2.8h, v18.8h
urshr v19.8h, v12.8h, #8
urshr v20.8h, v13.8h, #8
urshr v21.8h, v15.8h, #8
raddhn v24.8b, v12.8h, v19.8h
raddhn v25.8b, v13.8h, v20.8h
/*
* convert 8 r5g6b5 pixel data from {v4, v5} to planar
* 8-bit format and put data into v16 - blue, v17 - green,
* v18 - red
*/
mov v4.d[1], v5.d[0]
shrn v17.8b, v4.8h, #3
shrn v18.8b, v4.8h, #8
raddhn v26.8b, v15.8h, v21.8h
sli v4.8h, v4.8h, #5
sri v17.8b, v17.8b, #6
sri v18.8b, v18.8b, #5
/*
* 'combine_over_ca' replacement
*
* output: updated dest in v16 - blue, v17 - green, v18 - red
*/
mvn v24.8b, v24.8b
mvn v25.8b, v25.8b
shrn v16.8b, v4.8h, #2
mvn v26.8b, v26.8b
umull v5.8h, v16.8b, v24.8b
umull v6.8h, v17.8b, v25.8b
umull v7.8h, v18.8b, v26.8b
st1 {v14.8h}, [DST_W], #16
.endm
.macro pixman_composite_over_n_8888_0565_ca_init
mov v13.s[0], w4
dup v8.8b, v13.b[0]
dup v9.8b, v13.b[1]
dup v10.8b, v13.b[2]
dup v11.8b, v13.b[3]
.endm
.macro pixman_composite_over_n_8888_0565_ca_cleanup
.endm
generate_composite_function \
pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_0565_ca_init, \
pixman_composite_over_n_8888_0565_ca_cleanup, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_in_n_8_process_pixblock_head
/* expecting source data in {v0, v1, v2, v3} */
/* and destination data in {v4, v5, v6, v7} */
umull v8.8h, v4.8b, v3.8b
umull v9.8h, v5.8b, v3.8b
umull v10.8h, v6.8b, v3.8b
umull v11.8h, v7.8b, v3.8b
.endm
.macro pixman_composite_in_n_8_process_pixblock_tail
urshr v14.8h, v8.8h, #8
urshr v15.8h, v9.8h, #8
urshr v12.8h, v10.8h, #8
urshr v13.8h, v11.8h, #8
raddhn v28.8b, v8.8h, v14.8h
raddhn v29.8b, v9.8h, v15.8h
raddhn v30.8b, v10.8h, v12.8h
raddhn v31.8b, v11.8h, v13.8h
.endm
.macro pixman_composite_in_n_8_process_pixblock_tail_head
pixman_composite_in_n_8_process_pixblock_tail
ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
cache_preload 32, 32
pixman_composite_in_n_8_process_pixblock_head
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
.macro pixman_composite_in_n_8_init
mov v3.s[0], w4
dup v3.8b, v3.b[3]
.endm
.macro pixman_composite_in_n_8_cleanup
.endm
generate_composite_function \
pixman_composite_in_n_8_asm_neon, 0, 0, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_in_n_8_init, \
pixman_composite_in_n_8_cleanup, \
pixman_composite_in_n_8_process_pixblock_head, \
pixman_composite_in_n_8_process_pixblock_tail, \
pixman_composite_in_n_8_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
.macro pixman_composite_add_n_8_8_process_pixblock_head
/* expecting source data in {v8, v9, v10, v11} */
/* v8 - blue, v9 - green, v10 - red, v11 - alpha */
/* and destination data in {v4, v5, v6, v7} */
/* mask is in v24, v25, v26, v27 */
umull v0.8h, v24.8b, v11.8b
umull v1.8h, v25.8b, v11.8b
umull v2.8h, v26.8b, v11.8b
umull v3.8h, v27.8b, v11.8b
urshr v12.8h, v0.8h, #8
urshr v13.8h, v1.8h, #8
urshr v14.8h, v2.8h, #8
urshr v15.8h, v3.8h, #8
raddhn v0.8b, v0.8h, v12.8h
raddhn v1.8b, v1.8h, v13.8h
raddhn v2.8b, v2.8h, v14.8h
raddhn v3.8b, v3.8h, v15.8h
uqadd v28.8b, v0.8b, v4.8b
uqadd v29.8b, v1.8b, v5.8b
uqadd v30.8b, v2.8b, v6.8b
uqadd v31.8b, v3.8b, v7.8b
.endm
.macro pixman_composite_add_n_8_8_process_pixblock_tail
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
pixman_composite_add_n_8_8_process_pixblock_tail
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
fetch_mask_pixblock
cache_preload 32, 32
pixman_composite_add_n_8_8_process_pixblock_head
.endm
.macro pixman_composite_add_n_8_8_init
mov v11.s[0], w4
dup v11.8b, v11.b[3]
.endm
.macro pixman_composite_add_n_8_8_cleanup
.endm
generate_composite_function \
pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_n_8_8_init, \
pixman_composite_add_n_8_8_cleanup, \
pixman_composite_add_n_8_8_process_pixblock_head, \
pixman_composite_add_n_8_8_process_pixblock_tail, \
pixman_composite_add_n_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8_8_8_process_pixblock_head
/* expecting source data in {v0, v1, v2, v3} */
/* destination data in {v4, v5, v6, v7} */
/* mask in {v24, v25, v26, v27} */
umull v8.8h, v24.8b, v0.8b
umull v9.8h, v25.8b, v1.8b
umull v10.8h, v26.8b, v2.8b
umull v11.8h, v27.8b, v3.8b
urshr v0.8h, v8.8h, #8
urshr v1.8h, v9.8h, #8
urshr v12.8h, v10.8h, #8
urshr v13.8h, v11.8h, #8
raddhn v0.8b, v0.8h, v8.8h
raddhn v1.8b, v1.8h, v9.8h
raddhn v2.8b, v12.8h, v10.8h
raddhn v3.8b, v13.8h, v11.8h
uqadd v28.8b, v0.8b, v4.8b
uqadd v29.8b, v1.8b, v5.8b
uqadd v30.8b, v2.8b, v6.8b
uqadd v31.8b, v3.8b, v7.8b
.endm
.macro pixman_composite_add_8_8_8_process_pixblock_tail
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
pixman_composite_add_8_8_8_process_pixblock_tail
st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
fetch_mask_pixblock
fetch_src_pixblock
cache_preload 32, 32
pixman_composite_add_8_8_8_process_pixblock_head
.endm
.macro pixman_composite_add_8_8_8_init
.endm
.macro pixman_composite_add_8_8_8_cleanup
.endm
generate_composite_function \
pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
FLAG_DST_READWRITE, \
32, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_8_8_8_init, \
pixman_composite_add_8_8_8_cleanup, \
pixman_composite_add_8_8_8_process_pixblock_head, \
pixman_composite_add_8_8_8_process_pixblock_tail, \
pixman_composite_add_8_8_8_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_add_8888_8888_8888_process_pixblock_head
/* expecting source data in {v0, v1, v2, v3} */
/* destination data in {v4, v5, v6, v7} */
/* mask in {v24, v25, v26, v27} */
umull v8.8h, v27.8b, v0.8b
umull v9.8h, v27.8b, v1.8b
umull v10.8h, v27.8b, v2.8b
umull v11.8h, v27.8b, v3.8b
/* 1 cycle bubble */
ursra v8.8h, v8.8h, #8
ursra v9.8h, v9.8h, #8
ursra v10.8h, v10.8h, #8
ursra v11.8h, v11.8h, #8
.endm
.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail
/* 2 cycle bubble */
rshrn v28.8b, v8.8h, #8
rshrn v29.8b, v9.8h, #8
rshrn v30.8b, v10.8h, #8
rshrn v31.8b, v11.8h, #8
uqadd v28.8b, v4.8b, v28.8b
uqadd v29.8b, v5.8b, v29.8b
uqadd v30.8b, v6.8b, v30.8b
uqadd v31.8b, v7.8b, v31.8b
.endm
.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
fetch_src_pixblock
rshrn v28.8b, v8.8h, #8
fetch_mask_pixblock
rshrn v29.8b, v9.8h, #8
umull v8.8h, v27.8b, v0.8b
rshrn v30.8b, v10.8h, #8
umull v9.8h, v27.8b, v1.8b
rshrn v31.8b, v11.8h, #8
umull v10.8h, v27.8b, v2.8b
umull v11.8h, v27.8b, v3.8b
uqadd v28.8b, v4.8b, v28.8b
uqadd v29.8b, v5.8b, v29.8b
uqadd v30.8b, v6.8b, v30.8b
uqadd v31.8b, v7.8b, v31.8b
ursra v8.8h, v8.8h, #8
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
ursra v9.8h, v9.8h, #8
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
ursra v10.8h, v10.8h, #8
cache_preload 8, 8
ursra v11.8h, v11.8h, #8
.endm
generate_composite_function \
pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
generate_composite_function_single_scanline \
pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
/******************************************************************************/
generate_composite_function \
pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
27 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_add_n_8_8888_init
mov v3.s[0], w4
dup v0.8b, v3.b[0]
dup v1.8b, v3.b[1]
dup v2.8b, v3.b[2]
dup v3.8b, v3.b[3]
.endm
.macro pixman_composite_add_n_8_8888_cleanup
.endm
generate_composite_function \
pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_n_8_8888_init, \
pixman_composite_add_n_8_8888_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
27 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_add_8888_n_8888_init
mov v27.s[0], w6
dup v27.8b, v27.b[3]
.endm
.macro pixman_composite_add_8888_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_add_8888_n_8888_init, \
pixman_composite_add_8888_n_8888_cleanup, \
pixman_composite_add_8888_8888_8888_process_pixblock_head, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
27 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
/* expecting source data in {v0, v1, v2, v3} */
/* destination data in {v4, v5, v6, v7} */
/* solid mask is in v15 */
/* 'in' */
umull v11.8h, v15.8b, v3.8b
umull v10.8h, v15.8b, v2.8b
umull v9.8h, v15.8b, v1.8b
umull v8.8h, v15.8b, v0.8b
urshr v16.8h, v11.8h, #8
urshr v14.8h, v10.8h, #8
urshr v13.8h, v9.8h, #8
urshr v12.8h, v8.8h, #8
raddhn v3.8b, v11.8h, v16.8h
raddhn v2.8b, v10.8h, v14.8h
raddhn v1.8b, v9.8h, v13.8h
raddhn v0.8b, v8.8h, v12.8h
mvn v24.8b, v3.8b /* get inverted alpha */
/* now do alpha blending */
umull v8.8h, v24.8b, v4.8b
umull v9.8h, v24.8b, v5.8b
umull v10.8h, v24.8b, v6.8b
umull v11.8h, v24.8b, v7.8b
.endm
.macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
urshr v16.8h, v8.8h, #8
urshr v17.8h, v9.8h, #8
urshr v18.8h, v10.8h, #8
urshr v19.8h, v11.8h, #8
raddhn v28.8b, v16.8h, v8.8h
raddhn v29.8b, v17.8h, v9.8h
raddhn v30.8b, v18.8h, v10.8h
raddhn v31.8b, v19.8h, v11.8h
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
fetch_mask_pixblock
pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
generate_composite_function_single_scanline \
pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \
pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \
pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_8888_n_8888_process_pixblock_head
pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
.endm
.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
uqadd v28.8b, v0.8b, v28.8b
uqadd v29.8b, v1.8b, v29.8b
uqadd v30.8b, v2.8b, v30.8b
uqadd v31.8b, v3.8b, v31.8b
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
pixman_composite_over_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
pixman_composite_over_8888_n_8888_process_pixblock_head
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
.macro pixman_composite_over_8888_n_8888_init
mov v15.s[0], w6
dup v15.8b, v15.b[3]
.endm
.macro pixman_composite_over_8888_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_8888_n_8888_init, \
pixman_composite_over_8888_n_8888_cleanup, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_n_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
/******************************************************************************/
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
pixman_composite_over_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
fetch_mask_pixblock
pixman_composite_over_8888_n_8888_process_pixblock_head
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
generate_composite_function \
pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
generate_composite_function_single_scanline \
pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
12 /* mask_basereg */
/******************************************************************************/
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
pixman_composite_over_8888_n_8888_process_pixblock_tail
fetch_src_pixblock
cache_preload 8, 8
fetch_mask_pixblock
pixman_composite_over_8888_n_8888_process_pixblock_head
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
generate_composite_function \
pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_n_8888_process_pixblock_head, \
pixman_composite_over_8888_n_8888_process_pixblock_tail, \
pixman_composite_over_8888_8_8888_process_pixblock_tail_head \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0888_0888_process_pixblock_head
.endm
.macro pixman_composite_src_0888_0888_process_pixblock_tail
.endm
.macro pixman_composite_src_0888_0888_process_pixblock_tail_head
st3 {v0.8b, v1.8b, v2.8b}, [DST_W], #24
fetch_src_pixblock
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0888_0888_process_pixblock_head, \
pixman_composite_src_0888_0888_process_pixblock_tail, \
pixman_composite_src_0888_0888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0888_8888_rev_process_pixblock_head
mov v31.8b, v2.8b
mov v2.8b, v0.8b
mov v0.8b, v31.8b
.endm
.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail
.endm
.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head
st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], #32
fetch_src_pixblock
mov v31.8b, v2.8b
mov v2.8b, v0.8b
mov v0.8b, v31.8b
cache_preload 8, 8
.endm
.macro pixman_composite_src_0888_8888_rev_init
eor v3.8b, v3.8b, v3.8b
.endm
generate_composite_function \
pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
pixman_composite_src_0888_8888_rev_init, \
default_cleanup, \
pixman_composite_src_0888_8888_rev_process_pixblock_head, \
pixman_composite_src_0888_8888_rev_process_pixblock_tail, \
pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_0888_0565_rev_process_pixblock_head
ushll v8.8h, v1.8b, #7
sli v8.8h, v8.8h, #1
ushll v9.8h, v2.8b, #7
sli v9.8h, v9.8h, #1
.endm
.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail
ushll v14.8h, v0.8b, #7
sli v14.8h, v14.8h, #1
sri v14.8h, v8.8h, #5
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head
ushll v14.8h, v0.8b, #7
sli v14.8h, v14.8h, #1
fetch_src_pixblock
sri v14.8h, v8.8h, #5
sri v14.8h, v9.8h, #11
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
ushll v8.8h, v1.8b, #7
sli v8.8h, v8.8h, #1
st1 {v14.8h}, [DST_W], #16
ushll v9.8h, v2.8b, #7
sli v9.8h, v9.8h, #1
.endm
generate_composite_function \
pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_0888_0565_rev_process_pixblock_head, \
pixman_composite_src_0888_0565_rev_process_pixblock_tail, \
pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_pixbuf_8888_process_pixblock_head
umull v8.8h, v3.8b, v0.8b
umull v9.8h, v3.8b, v1.8b
umull v10.8h, v3.8b, v2.8b
.endm
.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail
urshr v11.8h, v8.8h, #8
mov v30.8b, v31.8b
mov v31.8b, v3.8b
mov v3.8b, v30.8b
urshr v12.8h, v9.8h, #8
urshr v13.8h, v10.8h, #8
raddhn v30.8b, v11.8h, v8.8h
raddhn v29.8b, v12.8h, v9.8h
raddhn v28.8b, v13.8h, v10.8h
.endm
.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head
urshr v11.8h, v8.8h, #8
mov v30.8b, v31.8b
mov v31.8b, v3.8b
mov v3.8b, v31.8b
urshr v12.8h, v9.8h, #8
urshr v13.8h, v10.8h, #8
fetch_src_pixblock
raddhn v30.8b, v11.8h, v8.8h
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
raddhn v29.8b, v12.8h, v9.8h
raddhn v28.8b, v13.8h, v10.8h
umull v8.8h, v3.8b, v0.8b
umull v9.8h, v3.8b, v1.8b
umull v10.8h, v3.8b, v2.8b
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF cmp PF_X, ORIG_W
PF lsl DUMMY, PF_X, src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
PF subs PF_CTL, PF_CTL, #0x10
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
10:
.endm
generate_composite_function \
pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_pixbuf_8888_process_pixblock_head, \
pixman_composite_src_pixbuf_8888_process_pixblock_tail, \
pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_rpixbuf_8888_process_pixblock_head
umull v8.8h, v3.8b, v0.8b
umull v9.8h, v3.8b, v1.8b
umull v10.8h, v3.8b, v2.8b
.endm
.macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail
urshr v11.8h, v8.8h, #8
mov v30.8b, v31.8b
mov v31.8b, v3.8b
mov v3.8b, v30.8b
urshr v12.8h, v9.8h, #8
urshr v13.8h, v10.8h, #8
raddhn v28.8b, v11.8h, v8.8h
raddhn v29.8b, v12.8h, v9.8h
raddhn v30.8b, v13.8h, v10.8h
.endm
.macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head
urshr v11.8h, v8.8h, #8
mov v30.8b, v31.8b
mov v31.8b, v3.8b
mov v3.8b, v30.8b
urshr v12.8h, v9.8h, #8
urshr v13.8h, v10.8h, #8
fetch_src_pixblock
raddhn v28.8b, v11.8h, v8.8h
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
PF beq 10f
PF add PF_X, PF_X, #8
PF sub PF_CTL, PF_CTL, #1
10:
raddhn v29.8b, v12.8h, v9.8h
raddhn v30.8b, v13.8h, v10.8h
umull v8.8h, v3.8b, v0.8b
umull v9.8h, v3.8b, v1.8b
umull v10.8h, v3.8b, v2.8b
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
PF cmp PF_X, ORIG_W
PF lsl DUMMY, PF_X, src_bpp_shift
PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
PF ble 10f
PF sub PF_X, PF_X, ORIG_W
PF subs PF_CTL, PF_CTL, #0x10
PF ble 10f
PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
PF ldrsb DUMMY, [PF_SRC, DUMMY]
PF add PF_SRC, PF_SRC, #1
10:
.endm
generate_composite_function \
pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_rpixbuf_8888_process_pixblock_head, \
pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \
pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_0565_8_0565_process_pixblock_head
/* mask is in v15 */
mov v4.d[0], v8.d[0]
mov v4.d[1], v9.d[0]
mov v13.d[0], v10.d[0]
mov v13.d[1], v11.d[0]
convert_0565_to_x888 v4, v2, v1, v0
convert_0565_to_x888 v13, v6, v5, v4
/* source pixel data is in {v0, v1, v2, XX} */
/* destination pixel data is in {v4, v5, v6, XX} */
mvn v7.8b, v15.8b
umull v10.8h, v15.8b, v2.8b
umull v9.8h, v15.8b, v1.8b
umull v8.8h, v15.8b, v0.8b
umull v11.8h, v7.8b, v4.8b
umull v12.8h, v7.8b, v5.8b
umull v13.8h, v7.8b, v6.8b
urshr v19.8h, v10.8h, #8
urshr v18.8h, v9.8h, #8
urshr v17.8h, v8.8h, #8
raddhn v2.8b, v10.8h, v19.8h
raddhn v1.8b, v9.8h, v18.8h
raddhn v0.8b, v8.8h, v17.8h
.endm
.macro pixman_composite_over_0565_8_0565_process_pixblock_tail
urshr v17.8h, v11.8h, #8
urshr v18.8h, v12.8h, #8
urshr v19.8h, v13.8h, #8
raddhn v28.8b, v17.8h, v11.8h
raddhn v29.8b, v18.8h, v12.8h
raddhn v30.8b, v19.8h, v13.8h
uqadd v0.8b, v0.8b, v28.8b
uqadd v1.8b, v1.8b, v29.8b
uqadd v2.8b, v2.8b, v30.8b
/* 32bpp result is in {v0, v1, v2, XX} */
convert_8888_to_0565 v2, v1, v0, v14, v30, v13
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head
fetch_mask_pixblock
pixman_composite_over_0565_8_0565_process_pixblock_tail
fetch_src_pixblock
ld1 {v10.4h, v11.4h}, [DST_R], #16
cache_preload 8, 8
pixman_composite_over_0565_8_0565_process_pixblock_head
st1 {v14.8h}, [DST_W], #16
.endm
generate_composite_function \
pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_0565_8_0565_process_pixblock_head, \
pixman_composite_over_0565_8_0565_process_pixblock_tail, \
pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_over_0565_n_0565_init
mov v15.s[0], w6
dup v15.8b, v15.b[3]
.endm
.macro pixman_composite_over_0565_n_0565_cleanup
.endm
generate_composite_function \
pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_0565_n_0565_init, \
pixman_composite_over_0565_n_0565_cleanup, \
pixman_composite_over_0565_8_0565_process_pixblock_head, \
pixman_composite_over_0565_8_0565_process_pixblock_tail, \
pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_add_0565_8_0565_process_pixblock_head
/* mask is in v15 */
mov v4.d[0], v8.d[0]
mov v4.d[1], v9.d[0]
mov v13.d[0], v10.d[0]
mov v13.d[1], v11.d[0]
convert_0565_to_x888 v4, v2, v1, v0
convert_0565_to_x888 v13, v6, v5, v4
/* source pixel data is in {v0, v1, v2, XX} */
/* destination pixel data is in {v4, v5, v6, XX} */
umull v9.8h, v15.8b, v2.8b
umull v8.8h, v15.8b, v1.8b
umull v7.8h, v15.8b, v0.8b
urshr v12.8h, v9.8h, #8
urshr v11.8h, v8.8h, #8
urshr v10.8h, v7.8h, #8
raddhn v2.8b, v9.8h, v12.8h
raddhn v1.8b, v8.8h, v11.8h
raddhn v0.8b, v7.8h, v10.8h
.endm
.macro pixman_composite_add_0565_8_0565_process_pixblock_tail
uqadd v0.8b, v0.8b, v4.8b
uqadd v1.8b, v1.8b, v5.8b
uqadd v2.8b, v2.8b, v6.8b
/* 32bpp result is in {v0, v1, v2, XX} */
convert_8888_to_0565 v2, v1, v0, v14, v30, v13
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head
fetch_mask_pixblock
pixman_composite_add_0565_8_0565_process_pixblock_tail
fetch_src_pixblock
ld1 {v10.4h, v11.4h}, [DST_R], #16
cache_preload 8, 8
pixman_composite_add_0565_8_0565_process_pixblock_head
st1 {v14.8h}, [DST_W], #16
.endm
generate_composite_function \
pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_add_0565_8_0565_process_pixblock_head, \
pixman_composite_add_0565_8_0565_process_pixblock_tail, \
pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_out_reverse_8_0565_process_pixblock_head
/* mask is in v15 */
mov v12.d[0], v10.d[0]
mov v12.d[1], v11.d[0]
convert_0565_to_x888 v12, v6, v5, v4
/* destination pixel data is in {v4, v5, v6, xx} */
mvn v24.8b, v15.8b /* get inverted alpha */
/* now do alpha blending */
umull v8.8h, v24.8b, v4.8b
umull v9.8h, v24.8b, v5.8b
umull v10.8h, v24.8b, v6.8b
.endm
.macro pixman_composite_out_reverse_8_0565_process_pixblock_tail
urshr v11.8h, v8.8h, #8
urshr v12.8h, v9.8h, #8
urshr v13.8h, v10.8h, #8
raddhn v0.8b, v11.8h, v8.8h
raddhn v1.8b, v12.8h, v9.8h
raddhn v2.8b, v13.8h, v10.8h
/* 32bpp result is in {v0, v1, v2, XX} */
convert_8888_to_0565 v2, v1, v0, v14, v12, v3
mov v28.d[0], v14.d[0]
mov v29.d[0], v14.d[1]
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head
fetch_src_pixblock
pixman_composite_out_reverse_8_0565_process_pixblock_tail
ld1 {v10.4h, v11.4h}, [DST_R], #16
cache_preload 8, 8
pixman_composite_out_reverse_8_0565_process_pixblock_head
st1 {v14.8h}, [DST_W], #16
.endm
generate_composite_function \
pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_out_reverse_8_0565_process_pixblock_head, \
pixman_composite_out_reverse_8_0565_process_pixblock_tail, \
pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
15, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_out_reverse_8_8888_process_pixblock_head
/* src is in v0 */
/* destination pixel data is in {v4, v5, v6, v7} */
mvn v1.8b, v0.8b /* get inverted alpha */
/* now do alpha blending */
umull v8.8h, v1.8b, v4.8b
umull v9.8h, v1.8b, v5.8b
umull v10.8h, v1.8b, v6.8b
umull v11.8h, v1.8b, v7.8b
.endm
.macro pixman_composite_out_reverse_8_8888_process_pixblock_tail
urshr v14.8h, v8.8h, #8
urshr v15.8h, v9.8h, #8
urshr v12.8h, v10.8h, #8
urshr v13.8h, v11.8h, #8
raddhn v28.8b, v14.8h, v8.8h
raddhn v29.8b, v15.8h, v9.8h
raddhn v30.8b, v12.8h, v10.8h
raddhn v31.8b, v13.8h, v11.8h
/* 32bpp result is in {v28, v29, v30, v31} */
.endm
/* TODO: expand macros and do better instructions scheduling */
.macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head
fetch_src_pixblock
pixman_composite_out_reverse_8_8888_process_pixblock_tail
ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
cache_preload 8, 8
pixman_composite_out_reverse_8_8888_process_pixblock_head
st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
.endm
generate_composite_function \
pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_out_reverse_8_8888_process_pixblock_head, \
pixman_composite_out_reverse_8_8888_process_pixblock_tail, \
pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_0565_process_pixblock_head, \
pixman_composite_over_8888_0565_process_pixblock_tail, \
pixman_composite_over_8888_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
0, /* src_basereg */ \
24 /* mask_basereg */
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_0565_process_pixblock_head, \
pixman_composite_src_8888_0565_process_pixblock_tail, \
pixman_composite_src_8888_0565_process_pixblock_tail_head
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_src_0565_8888_process_pixblock_head, \
pixman_composite_src_0565_8888_process_pixblock_tail, \
pixman_composite_src_0565_8888_process_pixblock_tail_head
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_8888_8_0565_process_pixblock_head, \
pixman_composite_over_8888_8_0565_process_pixblock_tail, \
pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
4, /* dst_r_basereg */ \
8, /* src_basereg */ \
24 /* mask_basereg */
generate_composite_function_nearest_scanline \
pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \
FLAG_DST_READWRITE, \
8, /* number of pixels, processed in a single block */ \
default_init_need_all_regs, \
default_cleanup_need_all_regs, \
pixman_composite_over_0565_8_0565_process_pixblock_head, \
pixman_composite_over_0565_8_0565_process_pixblock_tail, \
pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
28, /* dst_w_basereg */ \
10, /* dst_r_basereg */ \
8, /* src_basereg */ \
15 /* mask_basereg */
/******************************************************************************/
/*
* Bilinear scaling support code which tries to provide pixel fetching, color
* format conversion, and interpolation as separate macros which can be used
* as the basic building blocks for constructing bilinear scanline functions.
*/
.macro bilinear_load_8888 reg1, reg2, tmp
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #2
ld1 {®1&.2s}, [TMP1], STRIDE
ld1 {®2&.2s}, [TMP1]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
ld1 {®2&.s}[0], [TMP1], STRIDE
ld1 {®2&.s}[1], [TMP1]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
.macro bilinear_load_and_vertical_interpolate_two_8888 \
acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
bilinear_load_8888 reg1, reg2, tmp1
umull &acc1&.8h, ®1&.8b, v28.8b
umlal &acc1&.8h, ®2&.8b, v29.8b
bilinear_load_8888 reg3, reg4, tmp2
umull &acc2&.8h, ®3&.8b, v28.8b
umlal &acc2&.8h, ®4&.8b, v29.8b
.endm
.macro bilinear_load_and_vertical_interpolate_four_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
.endm
.macro vzip reg1, reg2
umov TMP4, v31.d[0]
zip1 v31.8b, reg1, reg2
zip2 reg2, reg1, reg2
mov reg1, v31.8b
mov v31.d[0], TMP4
.endm
.macro vuzp reg1, reg2
umov TMP4, v31.d[0]
uzp1 v31.8b, reg1, reg2
uzp2 reg2, reg1, reg2
mov reg1, v31.8b
mov v31.d[0], TMP4
.endm
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
asr TMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #1
ld1 {&acc2&.s}[0], [TMP1], STRIDE
ld1 {&acc2&.s}[2], [TMP2], STRIDE
ld1 {&acc2&.s}[1], [TMP1]
ld1 {&acc2&.s}[3], [TMP2]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip ®1&.8b, ®3&.8b
vzip ®2&.8b, ®4&.8b
vzip ®3&.8b, ®4&.8b
vzip ®1&.8b, ®2&.8b
umull &acc1&.8h, ®1&.8b, v28.8b
umlal &acc1&.8h, ®2&.8b, v29.8b
umull &acc2&.8h, ®3&.8b, v28.8b
umlal &acc2&.8h, ®4&.8b, v29.8b
.endm
.macro bilinear_load_and_vertical_interpolate_four_0565 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
asr TMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #1
ld1 {&xacc2&.s}[0], [TMP1], STRIDE
ld1 {&xacc2&.s}[2], [TMP2], STRIDE
ld1 {&xacc2&.s}[1], [TMP1]
ld1 {&xacc2&.s}[3], [TMP2]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #1
asr TMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #1
ld1 {&yacc2&.s}[0], [TMP1], STRIDE
vzip &xreg1&.8b, &xreg3&.8b
ld1 {&yacc2&.s}[2], [TMP2], STRIDE
vzip &xreg2&.8b, &xreg4&.8b
ld1 {&yacc2&.s}[1], [TMP1]
vzip &xreg3&.8b, &xreg4&.8b
ld1 {&yacc2&.s}[3], [TMP2]
vzip &xreg1&.8b, &xreg2&.8b
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
umull &xacc1&.8h, &xreg1&.8b, v28.8b
vzip &yreg1&.8b, &yreg3&.8b
umlal &xacc1&.8h, &xreg2&.8b, v29.8b
vzip &yreg2&.8b, &yreg4&.8b
umull &xacc2&.8h, &xreg3&.8b, v28.8b
vzip &yreg3&.8b, &yreg4&.8b
umlal &xacc2&.8h, &xreg4&.8b, v29.8b
vzip &yreg1&.8b, &yreg2&.8b
umull &yacc1&.8h, &yreg1&.8b, v28.8b
umlal &yacc1&.8h, &yreg2&.8b, v29.8b
umull &yacc2&.8h, &yreg3&.8b, v28.8b
umlal &yacc2&.8h, &yreg4&.8b, v29.8b
.endm
.macro bilinear_store_8888 numpix, tmp1, tmp2
.if numpix == 4
st1 {v0.2s, v1.2s}, [OUT], #16
.elseif numpix == 2
st1 {v0.2s}, [OUT], #8
.elseif numpix == 1
st1 {v0.s}[0], [OUT], #4
.else
.error bilinear_store_8888 numpix is unsupported
.endif
.endm
.macro bilinear_store_0565 numpix, tmp1, tmp2
vuzp v0.8b, v1.8b
vuzp v2.8b, v3.8b
vuzp v1.8b, v3.8b
vuzp v0.8b, v2.8b
convert_8888_to_0565 v2, v1, v0, v1, tmp1, tmp2
.if numpix == 4
st1 {v1.4h}, [OUT], #8
.elseif numpix == 2
st1 {v1.s}[0], [OUT], #4
.elseif numpix == 1
st1 {v1.h}[0], [OUT], #2
.else
.error bilinear_store_0565 numpix is unsupported
.endif
.endm
.macro bilinear_interpolate_last_pixel src_fmt, dst_fmt
bilinear_load_&src_fmt v0, v1, v2
umull v2.8h, v0.8b, v28.8b
umlal v2.8h, v1.8b, v29.8b
/* 5 cycles bubble */
ushll v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v2.4h, v15.h[0]
umlal2 v0.4s, v2.8h, v15.h[0]
/* 5 cycles bubble */
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
/* 3 cycles bubble */
xtn v0.8b, v0.8h
/* 1 cycle bubble */
bilinear_store_&dst_fmt 1, v3, v4
.endm
.macro bilinear_interpolate_two_pixels src_fmt, dst_fmt
bilinear_load_and_vertical_interpolate_two_&src_fmt \
v1, v11, v2, v3, v20, v21, v22, v23
ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v1.4h, v15.h[0]
umlal2 v0.4s, v1.8h, v15.h[0]
ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v10.4s, v11.4h, v15.h[4]
umlal2 v10.4s, v11.8h, v15.h[4]
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
xtn v0.8b, v0.8h
bilinear_store_&dst_fmt 2, v3, v4
.endm
.macro bilinear_interpolate_four_pixels src_fmt, dst_fmt
bilinear_load_and_vertical_interpolate_four_&src_fmt \
v1, v11, v14, v20, v16, v17, v22, v23 \
v3, v9, v24, v25, v26, v27, v18, v19
prfm PREFETCH_MODE, [TMP1, PF_OFFS]
sub TMP1, TMP1, STRIDE
ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v1.4h, v15.h[0]
umlal2 v0.4s, v1.8h, v15.h[0]
ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v10.4s, v11.4h, v15.h[4]
umlal2 v10.4s, v11.8h, v15.h[4]
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
ushll v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v2.4s, v3.4h, v15.h[0]
umlal2 v2.4s, v3.8h, v15.h[0]
ushll v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
prfm PREFETCH_MODE, [TMP2, PF_OFFS]
umlsl v8.4s, v9.4h, v15.h[4]
umlal2 v8.4s, v9.8h, v15.h[4]
add v12.8h, v12.8h, v13.8h
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
xtn v0.8b, v0.8h
xtn v1.8b, v2.8h
add v12.8h, v12.8h, v13.8h
bilinear_store_&dst_fmt 4, v3, v4
.endm
.macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head
.else
bilinear_interpolate_four_pixels src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail
.endif
.endm
.macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head
.else
bilinear_interpolate_four_pixels src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head
.else
bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail
.else
bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head
.else
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.endif
.endm
.set BILINEAR_FLAG_UNROLL_4, 0
.set BILINEAR_FLAG_UNROLL_8, 1
.set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2
/*
* Main template macro for generating NEON optimized bilinear scanline
* functions.
*
* Bilinear scanline scaler macro template uses the following arguments:
* fname - name of the function to generate
* src_fmt - source color format (8888 or 0565)
* dst_fmt - destination color format (8888 or 0565)
* bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes
* prefetch_distance - prefetch in the source image by that many
* pixels ahead
*/
.macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \
src_bpp_shift, dst_bpp_shift, \
prefetch_distance, flags
pixman_asm_function fname
OUT .req x0
TOP .req x1
BOTTOM .req x2
WT .req x3
WB .req x4
X .req x5
UX .req x6
WIDTH .req x7
TMP1 .req x8
TMP2 .req x9
PF_OFFS .req x10
TMP3 .req x11
TMP4 .req x12
STRIDE .req x13
sxtw x3, w3
sxtw x4, w4
sxtw x5, w5
sxtw x6, w6
sxtw x7, w7
stp x29, x30, [sp, -16]!
mov x29, sp
sub sp, sp, 112 /* push all registers */
sub x29, x29, 64
st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32
st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32
stp x8, x9, [x29, -80]
stp x10, x11, [x29, -96]
stp x12, x13, [x29, -112]
mov PF_OFFS, #prefetch_distance
mul PF_OFFS, PF_OFFS, UX
subs STRIDE, BOTTOM, TOP
.unreq BOTTOM
cmp WIDTH, #0
ble 300f
dup v12.8h, w5
dup v13.8h, w6
dup v28.8b, w3
dup v29.8b, w4
mov v25.d[0], v12.d[1]
mov v26.d[0], v13.d[0]
add v25.4h, v25.4h, v26.4h
mov v12.d[1], v25.d[0]
/* ensure good destination alignment */
cmp WIDTH, #1
blt 100f
tst OUT, #(1 << dst_bpp_shift)
beq 100f
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
bilinear_interpolate_last_pixel src_fmt, dst_fmt
sub WIDTH, WIDTH, #1
100:
add v13.8h, v13.8h, v13.8h
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
cmp WIDTH, #2
blt 100f
tst OUT, #(1 << (dst_bpp_shift + 1))
beq 100f
bilinear_interpolate_two_pixels src_fmt, dst_fmt
sub WIDTH, WIDTH, #2
100:
.if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0
/*********** 8 pixels per iteration *****************/
cmp WIDTH, #4
blt 100f
tst OUT, #(1 << (dst_bpp_shift + 2))
beq 100f
bilinear_interpolate_four_pixels src_fmt, dst_fmt
sub WIDTH, WIDTH, #4
100:
subs WIDTH, WIDTH, #8
blt 100f
asr PF_OFFS, PF_OFFS, #(16 - src_bpp_shift)
bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #8
blt 500f
1000:
bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #8
bge 1000b
500:
bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
100:
tst WIDTH, #4
beq 200f
bilinear_interpolate_four_pixels src_fmt, dst_fmt
200:
.else
/*********** 4 pixels per iteration *****************/
subs WIDTH, WIDTH, #4
blt 100f
asr PF_OFFS, PF_OFFS, #(16 - src_bpp_shift)
bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #4
blt 500f
1000:
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #4
bge 1000b
500:
bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
100:
/****************************************************/
.endif
/* handle the remaining trailing pixels */
tst WIDTH, #2
beq 200f
bilinear_interpolate_two_pixels src_fmt, dst_fmt
200:
tst WIDTH, #1
beq 300f
bilinear_interpolate_last_pixel src_fmt, dst_fmt
300:
sub x29, x29, 64
ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32
ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32
ldp x8, x9, [x29, -80]
ldp x10, x11, [x29, -96]
ldp x12, x13, [x29, -104]
mov sp, x29
ldp x29, x30, [sp], 16
ret
.unreq OUT
.unreq TOP
.unreq WT
.unreq WB
.unreq X
.unreq UX
.unreq WIDTH
.unreq TMP1
.unreq TMP2
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.unreq STRIDE
.endfunc
.endm
/*****************************************************************************/
.set have_bilinear_interpolate_four_pixels_8888_8888, 1
.macro bilinear_interpolate_four_pixels_8888_8888_head
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #2
asr TMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #2
ld1 {v22.2s}, [TMP1], STRIDE
ld1 {v23.2s}, [TMP1]
asr TMP3, X, #16
add X, X, UX
add TMP3, TOP, TMP3, lsl #2
umull v8.8h, v22.8b, v28.8b
umlal v8.8h, v23.8b, v29.8b
ld1 {v22.2s}, [TMP2], STRIDE
ld1 {v23.2s}, [TMP2]
asr TMP4, X, #16
add X, X, UX
add TMP4, TOP, TMP4, lsl #2
umull v9.8h, v22.8b, v28.8b
umlal v9.8h, v23.8b, v29.8b
ld1 {v22.2s}, [TMP3], STRIDE
ld1 {v23.2s}, [TMP3]
umull v10.8h, v22.8b, v28.8b
umlal v10.8h, v23.8b, v29.8b
ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v0.4s, v8.4h, v15.h[0]
umlal2 v0.4s, v8.8h, v15.h[0]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
ld1 {v16.2s}, [TMP4], STRIDE
ld1 {v17.2s}, [TMP4]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
umull v11.8h, v16.8b, v28.8b
umlal v11.8h, v17.8b, v29.8b
ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v1.4s, v9.4h, v15.h[4]
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail
umlal2 v1.4s, v9.8h, v15.h[4]
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v2.4s, v10.4h, v15.h[0]
umlal2 v2.4s, v10.8h, v15.h[0]
ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v3.4s, v11.4h, v15.h[4]
umlal2 v3.4s, v11.8h, v15.h[4]
add v12.8h, v12.8h, v13.8h
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
xtn v6.8b, v0.8h
xtn v7.8b, v2.8h
add v12.8h, v12.8h, v13.8h
st1 {v6.2s, v7.2s}, [OUT], #16
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail_head
asr TMP1, X, #16
add X, X, UX
add TMP1, TOP, TMP1, lsl #2
asr TMP2, X, #16
add X, X, UX
add TMP2, TOP, TMP2, lsl #2
umlal2 v1.4s, v9.8h, v15.h[4]
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v2.4s, v10.4h, v15.h[0]
umlal2 v2.4s, v10.8h, v15.h[0]
ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
ld1 {v20.2s}, [TMP1], STRIDE
umlsl v3.4s, v11.4h, v15.h[4]
umlal2 v3.4s, v11.8h, v15.h[4]
ld1 {v21.2s}, [TMP1]
umull v8.8h, v20.8b, v28.8b
umlal v8.8h, v21.8b, v29.8b
shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
shrn v4.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
ld1 {v22.2s}, [TMP2], STRIDE
shrn2 v4.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
add v12.8h, v12.8h, v13.8h
ld1 {v23.2s}, [TMP2]
umull v9.8h, v22.8b, v28.8b
asr TMP3, X, #16
add X, X, UX
add TMP3, TOP, TMP3, lsl #2
asr TMP4, X, #16
add X, X, UX
add TMP4, TOP, TMP4, lsl #2
umlal v9.8h, v23.8b, v29.8b
ld1 {v22.2s}, [TMP3], STRIDE
ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
ld1 {v23.2s}, [TMP3]
umull v10.8h, v22.8b, v28.8b
umlal v10.8h, v23.8b, v29.8b
xtn v6.8b, v0.8h
ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
xtn v7.8b, v4.8h
umlsl v0.4s, v8.4h, v15.h[0]
umlal2 v0.4s, v8.8h, v15.h[0]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
ld1 {v16.2s}, [TMP4], STRIDE
add v12.8h, v12.8h, v13.8h
ld1 {v17.2s}, [TMP4]
prfm PREFETCH_MODE, [TMP4, PF_OFFS]
umull v11.8h, v16.8b, v28.8b
umlal v11.8h, v17.8b, v29.8b
st1 {v6.2s, v7.2s}, [OUT], #16
ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
umlsl v1.4s, v9.4h, v15.h[4]
.endm
/*****************************************************************************/
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \
2, 2, 28, BILINEAR_FLAG_UNROLL_4
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \
2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \
1, 2, 28, BILINEAR_FLAG_UNROLL_4
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \
1, 1, 28, BILINEAR_FLAG_UNROLL_4
|
iMAGRAY/Shelldone | 45,185 | deps/cairo/pixman/pixman/pixman-arm-neon-asm-bilinear.S | /*
* Copyright © 2011 SCore Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
* Author: Taekyun Kim (tkq.kim@samsung.com)
*/
/*
* This file contains scaled bilinear scanline functions implemented
* using older siarhei's bilinear macro template.
*
* << General scanline function procedures >>
* 1. bilinear interpolate source pixels
* 2. load mask pixels
* 3. load destination pixels
* 4. duplicate mask to fill whole register
* 5. interleave source & destination pixels
* 6. apply mask to source pixels
* 7. combine source & destination pixels
* 8, Deinterleave final result
* 9. store destination pixels
*
* All registers with single number (i.e. src0, tmp0) are 64-bits registers.
* Registers with double numbers(src01, dst01) are 128-bits registers.
* All temp registers can be used freely outside the code block.
* Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks.
*
* Remarks
* There can be lots of pipeline stalls inside code block and between code blocks.
* Further optimizations will be done by new macro templates using head/tail_head/tail scheme.
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined (__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0
.eabi_attribute 12, 0
.arm
.altmacro
.p2align 2
#include "pixman-private.h"
#include "pixman-arm-asm.h"
#include "pixman-arm-neon-asm.h"
/*
* Bilinear macros from pixman-arm-neon-asm.S
*/
/*
* Bilinear scaling support code which tries to provide pixel fetching, color
* format conversion, and interpolation as separate macros which can be used
* as the basic building blocks for constructing bilinear scanline functions.
*/
.macro bilinear_load_8888 reg1, reg2, tmp
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
vld1.32 {reg1}, [TMP1], STRIDE
vld1.32 {reg2}, [TMP1]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
vld1.32 {reg2[0]}, [TMP1], STRIDE
vld1.32 {reg2[1]}, [TMP1]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
.macro bilinear_load_and_vertical_interpolate_two_8888 \
acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
bilinear_load_8888 reg1, reg2, tmp1
vmull.u8 acc1, reg1, d28
vmlal.u8 acc1, reg2, d29
bilinear_load_8888 reg3, reg4, tmp2
vmull.u8 acc2, reg3, d28
vmlal.u8 acc2, reg4, d29
.endm
.macro bilinear_load_and_vertical_interpolate_four_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
.endm
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #1
vld1.32 {acc2lo[0]}, [TMP1], STRIDE
vld1.32 {acc2hi[0]}, [TMP2], STRIDE
vld1.32 {acc2lo[1]}, [TMP1]
vld1.32 {acc2hi[1]}, [TMP2]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip.u8 reg1, reg3
vzip.u8 reg2, reg4
vzip.u8 reg3, reg4
vzip.u8 reg1, reg2
vmull.u8 acc1, reg1, d28
vmlal.u8 acc1, reg2, d29
vmull.u8 acc2, reg3, d28
vmlal.u8 acc2, reg4, d29
.endm
.macro bilinear_load_and_vertical_interpolate_four_0565 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #1
vld1.32 {xacc2lo[0]}, [TMP1], STRIDE
vld1.32 {xacc2hi[0]}, [TMP2], STRIDE
vld1.32 {xacc2lo[1]}, [TMP1]
vld1.32 {xacc2hi[1]}, [TMP2]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #1
vld1.32 {yacc2lo[0]}, [TMP1], STRIDE
vzip.u8 xreg1, xreg3
vld1.32 {yacc2hi[0]}, [TMP2], STRIDE
vzip.u8 xreg2, xreg4
vld1.32 {yacc2lo[1]}, [TMP1]
vzip.u8 xreg3, xreg4
vld1.32 {yacc2hi[1]}, [TMP2]
vzip.u8 xreg1, xreg2
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
vmull.u8 xacc1, xreg1, d28
vzip.u8 yreg1, yreg3
vmlal.u8 xacc1, xreg2, d29
vzip.u8 yreg2, yreg4
vmull.u8 xacc2, xreg3, d28
vzip.u8 yreg3, yreg4
vmlal.u8 xacc2, xreg4, d29
vzip.u8 yreg1, yreg2
vmull.u8 yacc1, yreg1, d28
vmlal.u8 yacc1, yreg2, d29
vmull.u8 yacc2, yreg3, d28
vmlal.u8 yacc2, yreg4, d29
.endm
.macro bilinear_store_8888 numpix, tmp1, tmp2
.if numpix == 4
vst1.32 {d0, d1}, [OUT]!
.elseif numpix == 2
vst1.32 {d0}, [OUT]!
.elseif numpix == 1
vst1.32 {d0[0]}, [OUT, :32]!
.else
.error bilinear_store_8888 numpix is unsupported
.endif
.endm
.macro bilinear_store_0565 numpix, tmp1, tmp2
vuzp.u8 d0, d1
vuzp.u8 d2, d3
vuzp.u8 d1, d3
vuzp.u8 d0, d2
convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2
.if numpix == 4
vst1.16 {d2}, [OUT]!
.elseif numpix == 2
vst1.32 {d2[0]}, [OUT]!
.elseif numpix == 1
vst1.16 {d2[0]}, [OUT]!
.else
.error bilinear_store_0565 numpix is unsupported
.endif
.endm
/*
* Macros for loading mask pixels into register 'mask'.
* vdup must be done in somewhere else.
*/
.macro bilinear_load_mask_x numpix, mask
.endm
.macro bilinear_load_mask_8 numpix, mask
.if numpix == 4
vld1.32 {mask[0]}, [MASK]!
.elseif numpix == 2
vld1.16 {mask[0]}, [MASK]!
.elseif numpix == 1
vld1.8 {mask[0]}, [MASK]!
.else
.error bilinear_load_mask_8 numpix is unsupported
.endif
pld [MASK, #prefetch_offset]
.endm
.macro bilinear_load_mask mask_fmt, numpix, mask
bilinear_load_mask_&mask_fmt numpix, mask
.endm
/*
* Macros for loading destination pixels into register 'dst0' and 'dst1'.
* Interleave should be done somewhere else.
*/
.macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.if numpix == 4
vld1.32 {dst0, dst1}, [OUT]
.elseif numpix == 2
vld1.32 {dst0}, [OUT]
.elseif numpix == 1
vld1.32 {dst0[0]}, [OUT]
.else
.error bilinear_load_dst_8888 numpix is unsupported
.endif
pld [OUT, #(prefetch_offset * 4)]
.endm
.macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01
bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01
bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01
bilinear_load_dst_&dst_fmt&_&op numpix, dst0, dst1, dst01
.endm
/*
* Macros for duplicating partially loaded mask to fill entire register.
* We will apply mask to interleaved source pixels, that is
* (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3)
* (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3)
* So, we need to duplicate loaded mask into whole register.
*
* For two pixel case
* (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* We can do some optimizations for this including last pixel cases.
*/
.macro bilinear_duplicate_mask_x numpix, mask
.endm
.macro bilinear_duplicate_mask_8 numpix, mask
.if numpix == 4
vdup.32 mask, mask[0]
.elseif numpix == 2
vdup.16 mask, mask[0]
.elseif numpix == 1
vdup.8 mask, mask[0]
.else
.error bilinear_duplicate_mask_8 is unsupported
.endif
.endm
.macro bilinear_duplicate_mask mask_fmt, numpix, mask
bilinear_duplicate_mask_&mask_fmt numpix, mask
.endm
/*
* Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
* Interleave should be done when maks is enabled or operator is 'over'.
*/
.macro bilinear_interleave src0, src1, dst0, dst1
vuzp.8 src0, src1
vuzp.8 dst0, dst1
vuzp.8 src0, src1
vuzp.8 dst0, dst1
.endm
.macro bilinear_interleave_src_dst_x_src \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_x_over \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst_x_add \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_8_src \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst_8_over \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst_8_add \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst \
mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave_src_dst_&mask_fmt&_&op \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
/*
* Macros for applying masks to src pixels. (see combine_mask_u() function)
* src, dst should be in interleaved form.
* mask register should be in form (m0, m1, m2, m3).
*/
.macro bilinear_apply_mask_to_src_x \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
.endm
.macro bilinear_apply_mask_to_src_8 \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
vmull.u8 tmp01, src0, mask
vmull.u8 tmp23, src1, mask
/* bubbles */
vrshr.u16 tmp45, tmp01, #8
vrshr.u16 tmp67, tmp23, #8
/* bubbles */
vraddhn.u16 src0, tmp45, tmp01
vraddhn.u16 src1, tmp67, tmp23
.endm
.macro bilinear_apply_mask_to_src \
mask_fmt, numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
bilinear_apply_mask_to_src_&mask_fmt \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
.endm
/*
* Macros for combining src and destination pixels.
* Interleave or not is depending on operator 'op'.
*/
.macro bilinear_combine_src \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
.endm
.macro bilinear_combine_over \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
vdup.32 tmp8, src1[1]
/* bubbles */
vmvn.8 tmp8, tmp8
/* bubbles */
vmull.u8 tmp01, dst0, tmp8
/* bubbles */
vmull.u8 tmp23, dst1, tmp8
/* bubbles */
vrshr.u16 tmp45, tmp01, #8
vrshr.u16 tmp67, tmp23, #8
/* bubbles */
vraddhn.u16 dst0, tmp45, tmp01
vraddhn.u16 dst1, tmp67, tmp23
/* bubbles */
vqadd.u8 src01, dst01, src01
.endm
.macro bilinear_combine_add \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
vqadd.u8 src01, dst01, src01
.endm
.macro bilinear_combine \
op, numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
bilinear_combine_&op \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
.endm
/*
* Macros for final deinterleaving of destination pixels if needed.
*/
.macro bilinear_deinterleave numpix, dst0, dst1, dst01
vuzp.8 dst0, dst1
/* bubbles */
vuzp.8 dst0, dst1
.endm
.macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01
bilinear_deinterleave_dst_&mask_fmt&_&op numpix, dst0, dst1, dst01
.endm
.macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op
bilinear_load_&src_fmt d0, d1, d2
bilinear_load_mask mask_fmt, 1, d4
bilinear_load_dst dst_fmt, op, 1, d18, d19, q9
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
/* 5 cycles bubble */
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
/* 5 cycles bubble */
bilinear_duplicate_mask mask_fmt, 1, d4
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
/* 3 cycles bubble */
vmovn.u16 d0, q0
/* 1 cycle bubble */
bilinear_interleave_src_dst \
mask_fmt, op, 1, d0, d1, q0, d18, d19, q9
bilinear_apply_mask_to_src \
mask_fmt, 1, d0, d1, q0, d4, \
q3, q8, q10, q11
bilinear_combine \
op, 1, d0, d1, q0, d18, d19, q9, \
q3, q8, q10, q11, d5
bilinear_deinterleave_dst mask_fmt, op, 1, d0, d1, q0
bilinear_store_&dst_fmt 1, q2, q3
.endm
.macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op
bilinear_load_and_vertical_interpolate_two_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23
bilinear_load_mask mask_fmt, 2, d4
bilinear_load_dst dst_fmt, op, 2, d18, d19, q9
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
bilinear_duplicate_mask mask_fmt, 2, d4
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vmovn.u16 d0, q0
bilinear_interleave_src_dst \
mask_fmt, op, 2, d0, d1, q0, d18, d19, q9
bilinear_apply_mask_to_src \
mask_fmt, 2, d0, d1, q0, d4, \
q3, q8, q10, q11
bilinear_combine \
op, 2, d0, d1, q0, d18, d19, q9, \
q3, q8, q10, q11, d5
bilinear_deinterleave_dst mask_fmt, op, 2, d0, d1, q0
bilinear_store_&dst_fmt 2, q2, q3
.endm
.macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op
bilinear_load_and_vertical_interpolate_four_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23 \
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
sub TMP1, TMP1, STRIDE
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d6, d30
vmlal.u16 q2, d7, d30
vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS
bilinear_load_mask mask_fmt, 4, d22
bilinear_load_dst dst_fmt, op, 4, d2, d3, q1
pld [TMP1, PF_OFFS]
vmlsl.u16 q8, d18, d31
vmlal.u16 q8, d19, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS)
bilinear_duplicate_mask mask_fmt, 4, d22
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d0, q0
vmovn.u16 d1, q2
vadd.u16 q12, q12, q13
bilinear_interleave_src_dst \
mask_fmt, op, 4, d0, d1, q0, d2, d3, q1
bilinear_apply_mask_to_src \
mask_fmt, 4, d0, d1, q0, d22, \
q3, q8, q9, q10
bilinear_combine \
op, 4, d0, d1, q0, d2, d3, q1, \
q3, q8, q9, q10, d23
bilinear_deinterleave_dst mask_fmt, op, 4, d0, d1, q0
bilinear_store_&dst_fmt 4, q2, q3
.endm
.set BILINEAR_FLAG_USE_MASK, 1
.set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2
/*
* Main template macro for generating NEON optimized bilinear scanline functions.
*
* Bilinear scanline generator macro take folling arguments:
* fname - name of the function to generate
* src_fmt - source color format (8888 or 0565)
* dst_fmt - destination color format (8888 or 0565)
* src/dst_bpp_shift - (1 << bpp_shift) is the size of src/dst pixel in bytes
* process_last_pixel - code block that interpolate one pixel and does not
* update horizontal weight
* process_two_pixels - code block that interpolate two pixels and update
* horizontal weight
* process_four_pixels - code block that interpolate four pixels and update
* horizontal weight
* process_pixblock_head - head part of middle loop
* process_pixblock_tail - tail part of middle loop
* process_pixblock_tail_head - tail_head of middle loop
* pixblock_size - number of pixels processed in a single middle loop
* prefetch_distance - prefetch in the source image by that many pixels ahead
*/
.macro generate_bilinear_scanline_func \
fname, \
src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \
bilinear_process_last_pixel, \
bilinear_process_two_pixels, \
bilinear_process_four_pixels, \
bilinear_process_pixblock_head, \
bilinear_process_pixblock_tail, \
bilinear_process_pixblock_tail_head, \
pixblock_size, \
prefetch_distance, \
flags
pixman_asm_function fname
.if pixblock_size == 8
.elseif pixblock_size == 4
.else
.error unsupported pixblock size
.endif
.if ((flags) & BILINEAR_FLAG_USE_MASK) == 0
OUT .req r0
TOP .req r1
BOTTOM .req r2
WT .req r3
WB .req r4
X .req r5
UX .req r6
WIDTH .req ip
TMP1 .req r3
TMP2 .req r4
PF_OFFS .req r7
TMP3 .req r8
TMP4 .req r9
STRIDE .req r2
mov ip, sp
push {r4, r5, r6, r7, r8, r9}
mov PF_OFFS, #prefetch_distance
ldmia ip, {WB, X, UX, WIDTH}
.else
OUT .req r0
MASK .req r1
TOP .req r2
BOTTOM .req r3
WT .req r4
WB .req r5
X .req r6
UX .req r7
WIDTH .req ip
TMP1 .req r4
TMP2 .req r5
PF_OFFS .req r8
TMP3 .req r9
TMP4 .req r10
STRIDE .req r3
.set prefetch_offset, prefetch_distance
mov ip, sp
push {r4, r5, r6, r7, r8, r9, r10, ip}
mov PF_OFFS, #prefetch_distance
ldmia ip, {WT, WB, X, UX, WIDTH}
.endif
mul PF_OFFS, PF_OFFS, UX
.if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0
vpush {d8-d15}
.endif
sub STRIDE, BOTTOM, TOP
.unreq BOTTOM
cmp WIDTH, #0
ble 3f
vdup.u16 q12, X
vdup.u16 q13, UX
vdup.u8 d28, WT
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
/* ensure good destination alignment */
cmp WIDTH, #1
blt 0f
tst OUT, #(1 << dst_bpp_shift)
beq 0f
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
bilinear_process_last_pixel
sub WIDTH, WIDTH, #1
0:
vadd.u16 q13, q13, q13
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
cmp WIDTH, #2
blt 0f
tst OUT, #(1 << (dst_bpp_shift + 1))
beq 0f
bilinear_process_two_pixels
sub WIDTH, WIDTH, #2
0:
.if pixblock_size == 8
cmp WIDTH, #4
blt 0f
tst OUT, #(1 << (dst_bpp_shift + 2))
beq 0f
bilinear_process_four_pixels
sub WIDTH, WIDTH, #4
0:
.endif
subs WIDTH, WIDTH, #pixblock_size
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift)
bilinear_process_pixblock_head
subs WIDTH, WIDTH, #pixblock_size
blt 5f
0:
bilinear_process_pixblock_tail_head
subs WIDTH, WIDTH, #pixblock_size
bge 0b
5:
bilinear_process_pixblock_tail
1:
.if pixblock_size == 8
tst WIDTH, #4
beq 2f
bilinear_process_four_pixels
2:
.endif
/* handle the remaining trailing pixels */
tst WIDTH, #2
beq 2f
bilinear_process_two_pixels
2:
tst WIDTH, #1
beq 3f
bilinear_process_last_pixel
3:
.if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0
vpop {d8-d15}
.endif
.if ((flags) & BILINEAR_FLAG_USE_MASK) == 0
pop {r4, r5, r6, r7, r8, r9}
.else
pop {r4, r5, r6, r7, r8, r9, r10, ip}
.endif
bx lr
.unreq OUT
.unreq TOP
.unreq WT
.unreq WB
.unreq X
.unreq UX
.unreq WIDTH
.unreq TMP1
.unreq TMP2
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.unreq STRIDE
.if ((flags) & BILINEAR_FLAG_USE_MASK) != 0
.unreq MASK
.endif
.endfunc
.endm
/* src_8888_8_8888 */
.macro bilinear_src_8888_8_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 8888, src
.endm
.macro bilinear_src_8888_8_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, src
.endm
.macro bilinear_src_8888_8_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 8888, src
.endm
.macro bilinear_src_8888_8_8888_process_pixblock_head
bilinear_src_8888_8_8888_process_four_pixels
.endm
.macro bilinear_src_8888_8_8888_process_pixblock_tail
.endm
.macro bilinear_src_8888_8_8888_process_pixblock_tail_head
bilinear_src_8888_8_8888_process_pixblock_tail
bilinear_src_8888_8_8888_process_pixblock_head
.endm
/* src_8888_8_0565 */
.macro bilinear_src_8888_8_0565_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 0565, src
.endm
.macro bilinear_src_8888_8_0565_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 0565, src
.endm
.macro bilinear_src_8888_8_0565_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 0565, src
.endm
.macro bilinear_src_8888_8_0565_process_pixblock_head
bilinear_src_8888_8_0565_process_four_pixels
.endm
.macro bilinear_src_8888_8_0565_process_pixblock_tail
.endm
.macro bilinear_src_8888_8_0565_process_pixblock_tail_head
bilinear_src_8888_8_0565_process_pixblock_tail
bilinear_src_8888_8_0565_process_pixblock_head
.endm
/* src_0565_8_x888 */
.macro bilinear_src_0565_8_x888_process_last_pixel
bilinear_interpolate_last_pixel 0565, 8, 8888, src
.endm
.macro bilinear_src_0565_8_x888_process_two_pixels
bilinear_interpolate_two_pixels 0565, 8, 8888, src
.endm
.macro bilinear_src_0565_8_x888_process_four_pixels
bilinear_interpolate_four_pixels 0565, 8, 8888, src
.endm
.macro bilinear_src_0565_8_x888_process_pixblock_head
bilinear_src_0565_8_x888_process_four_pixels
.endm
.macro bilinear_src_0565_8_x888_process_pixblock_tail
.endm
.macro bilinear_src_0565_8_x888_process_pixblock_tail_head
bilinear_src_0565_8_x888_process_pixblock_tail
bilinear_src_0565_8_x888_process_pixblock_head
.endm
/* src_0565_8_0565 */
.macro bilinear_src_0565_8_0565_process_last_pixel
bilinear_interpolate_last_pixel 0565, 8, 0565, src
.endm
.macro bilinear_src_0565_8_0565_process_two_pixels
bilinear_interpolate_two_pixels 0565, 8, 0565, src
.endm
.macro bilinear_src_0565_8_0565_process_four_pixels
bilinear_interpolate_four_pixels 0565, 8, 0565, src
.endm
.macro bilinear_src_0565_8_0565_process_pixblock_head
bilinear_src_0565_8_0565_process_four_pixels
.endm
.macro bilinear_src_0565_8_0565_process_pixblock_tail
.endm
.macro bilinear_src_0565_8_0565_process_pixblock_tail_head
bilinear_src_0565_8_0565_process_pixblock_tail
bilinear_src_0565_8_0565_process_pixblock_head
.endm
/* over_8888_8888 */
.macro bilinear_over_8888_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, x, 8888, over
.endm
.macro bilinear_over_8888_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, x, 8888, over
.endm
.macro bilinear_over_8888_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, x, 8888, over
.endm
.macro bilinear_over_8888_8888_process_pixblock_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vld1.32 {d22}, [TMP1], STRIDE
vld1.32 {d23}, [TMP1]
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
vmull.u8 q8, d22, d28
vmlal.u8 q8, d23, d29
vld1.32 {d22}, [TMP2], STRIDE
vld1.32 {d23}, [TMP2]
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmull.u8 q9, d22, d28
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q1, d18, d31
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
.endm
.macro bilinear_over_8888_8888_process_pixblock_tail
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d2, d3}, [OUT, :128]
pld [OUT, #(prefetch_offset * 4)]
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d6, q0
vmovn.u16 d7, q2
vuzp.8 d6, d7
vuzp.8 d2, d3
vuzp.8 d6, d7
vuzp.8 d2, d3
vdup.32 d4, d7[1]
vmvn.8 d4, d4
vmull.u8 q11, d2, d4
vmull.u8 q2, d3, d4
vrshr.u16 q1, q11, #8
vrshr.u16 q10, q2, #8
vraddhn.u16 d2, q1, q11
vraddhn.u16 d3, q10, q2
vqadd.u8 q3, q1, q3
vuzp.8 d6, d7
vuzp.8 d6, d7
vadd.u16 q12, q12, q13
vst1.32 {d6, d7}, [OUT, :128]!
.endm
.macro bilinear_over_8888_8888_process_pixblock_tail_head
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
vmlsl.u16 q2, d20, d30
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d2, d3}, [OUT, :128]
pld [OUT, PF_OFFS]
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d6, q0
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vmovn.u16 d7, q2
vld1.32 {d22}, [TMP3], STRIDE
vuzp.8 d6, d7
vuzp.8 d2, d3
vuzp.8 d6, d7
vuzp.8 d2, d3
vdup.32 d4, d7[1]
vld1.32 {d23}, [TMP3]
vmvn.8 d4, d4
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmull.u8 q11, d2, d4
vmull.u8 q2, d3, d4
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d16, d30
vrshr.u16 q1, q11, #8
vmlal.u16 q0, d17, d30
vrshr.u16 q8, q2, #8
vraddhn.u16 d2, q1, q11
vraddhn.u16 d3, q8, q2
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vqadd.u8 q3, q1, q3
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vuzp.8 d6, d7
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vuzp.8 d6, d7
vmlsl.u16 q1, d18, d31
vadd.u16 q12, q12, q13
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vst1.32 {d6, d7}, [OUT, :128]!
.endm
/* over_8888_8_8888 */
.macro bilinear_over_8888_8_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 8888, over
.endm
.macro bilinear_over_8888_8_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, over
.endm
.macro bilinear_over_8888_8_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 8888, over
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
vld1.32 {d0}, [TMP1], STRIDE
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vld1.32 {d1}, [TMP1]
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
vld1.32 {d2}, [TMP2], STRIDE
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vld1.32 {d3}, [TMP2]
vmull.u8 q2, d0, d28
vmull.u8 q3, d2, d28
vmlal.u8 q2, d1, d29
vmlal.u8 q3, d3, d29
vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q0, d4, d30
vmlsl.u16 q1, d6, d31
vmlal.u16 q0, d5, d30
vmlal.u16 q1, d7, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vld1.32 {d2}, [TMP3], STRIDE
vld1.32 {d3}, [TMP3]
pld [TMP4, PF_OFFS]
vld1.32 {d4}, [TMP4], STRIDE
vld1.32 {d5}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q3, d2, d28
vmlal.u8 q3, d3, d29
vmull.u8 q1, d4, d28
vmlal.u8 q1, d5, d29
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vld1.32 {d22[0]}, [MASK]!
pld [MASK, #prefetch_offset]
vadd.u16 q12, q12, q13
vmovn.u16 d16, q0
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_tail
vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS
vmlsl.u16 q9, d6, d30
vmlsl.u16 q10, d2, d31
vmlal.u16 q9, d7, d30
vmlal.u16 q10, d3, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vdup.32 d22, d22[0]
vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vmovn.u16 d17, q9
vld1.32 {d18, d19}, [OUT, :128]
pld [OUT, PF_OFFS]
vuzp.8 d16, d17
vuzp.8 d18, d19
vuzp.8 d16, d17
vuzp.8 d18, d19
vmull.u8 q10, d16, d22
vmull.u8 q11, d17, d22
vrsra.u16 q10, q10, #8
vrsra.u16 q11, q11, #8
vrshrn.u16 d16, q10, #8
vrshrn.u16 d17, q11, #8
vdup.32 d22, d17[1]
vmvn.8 d22, d22
vmull.u8 q10, d18, d22
vmull.u8 q11, d19, d22
vrshr.u16 q9, q10, #8
vrshr.u16 q0, q11, #8
vraddhn.u16 d18, q9, q10
vraddhn.u16 d19, q0, q11
vqadd.u8 q9, q8, q9
vuzp.8 d18, d19
vuzp.8 d18, d19
vst1.32 {d18, d19}, [OUT, :128]!
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_tail_head
vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS
vld1.32 {d0}, [TMP1], STRIDE
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlsl.u16 q9, d6, d30
vmlsl.u16 q10, d2, d31
vld1.32 {d1}, [TMP1]
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
vmlal.u16 q9, d7, d30
vmlal.u16 q10, d3, d31
vld1.32 {d2}, [TMP2], STRIDE
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vadd.u16 q12, q12, q13
vld1.32 {d3}, [TMP2]
vdup.32 d22, d22[0]
vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vmull.u8 q2, d0, d28
vmull.u8 q3, d2, d28
vmovn.u16 d17, q9
vld1.32 {d18, d19}, [OUT, :128]
pld [OUT, #(prefetch_offset * 4)]
vmlal.u8 q2, d1, d29
vmlal.u8 q3, d3, d29
vuzp.8 d16, d17
vuzp.8 d18, d19
vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS
vuzp.8 d16, d17
vuzp.8 d18, d19
vmlsl.u16 q0, d4, d30
vmlsl.u16 q1, d6, d31
vmull.u8 q10, d16, d22
vmull.u8 q11, d17, d22
vmlal.u16 q0, d5, d30
vmlal.u16 q1, d7, d31
vrsra.u16 q10, q10, #8
vrsra.u16 q11, q11, #8
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vrshrn.u16 d16, q10, #8
vrshrn.u16 d17, q11, #8
vld1.32 {d2}, [TMP3], STRIDE
vdup.32 d22, d17[1]
vld1.32 {d3}, [TMP3]
vmvn.8 d22, d22
pld [TMP4, PF_OFFS]
vld1.32 {d4}, [TMP4], STRIDE
vmull.u8 q10, d18, d22
vmull.u8 q11, d19, d22
vld1.32 {d5}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q3, d2, d28
vrshr.u16 q9, q10, #8
vrshr.u16 q15, q11, #8
vmlal.u8 q3, d3, d29
vmull.u8 q1, d4, d28
vraddhn.u16 d18, q9, q10
vraddhn.u16 d19, q15, q11
vmlal.u8 q1, d5, d29
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vqadd.u8 q9, q8, q9
vld1.32 {d22[0]}, [MASK]!
vuzp.8 d18, d19
vadd.u16 q12, q12, q13
vuzp.8 d18, d19
vmovn.u16 d16, q0
vst1.32 {d18, d19}, [OUT, :128]!
.endm
/* add_8888_8888 */
.macro bilinear_add_8888_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, x, 8888, add
.endm
.macro bilinear_add_8888_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, x, 8888, add
.endm
.macro bilinear_add_8888_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, x, 8888, add
.endm
.macro bilinear_add_8888_8888_process_pixblock_head
bilinear_add_8888_8888_process_four_pixels
.endm
.macro bilinear_add_8888_8888_process_pixblock_tail
.endm
.macro bilinear_add_8888_8888_process_pixblock_tail_head
bilinear_add_8888_8888_process_pixblock_tail
bilinear_add_8888_8888_process_pixblock_head
.endm
/* add_8888_8_8888 */
.macro bilinear_add_8888_8_8888_process_last_pixel
bilinear_interpolate_last_pixel 8888, 8, 8888, add
.endm
.macro bilinear_add_8888_8_8888_process_two_pixels
bilinear_interpolate_two_pixels 8888, 8, 8888, add
.endm
.macro bilinear_add_8888_8_8888_process_four_pixels
bilinear_interpolate_four_pixels 8888, 8, 8888, add
.endm
.macro bilinear_add_8888_8_8888_process_pixblock_head
bilinear_add_8888_8_8888_process_four_pixels
.endm
.macro bilinear_add_8888_8_8888_process_pixblock_tail
.endm
.macro bilinear_add_8888_8_8888_process_pixblock_tail_head
bilinear_add_8888_8_8888_process_pixblock_tail
bilinear_add_8888_8_8888_process_pixblock_head
.endm
/* Bilinear scanline functions */
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \
8888, 8888, 2, 2, \
bilinear_src_8888_8_8888_process_last_pixel, \
bilinear_src_8888_8_8888_process_two_pixels, \
bilinear_src_8888_8_8888_process_four_pixels, \
bilinear_src_8888_8_8888_process_pixblock_head, \
bilinear_src_8888_8_8888_process_pixblock_tail, \
bilinear_src_8888_8_8888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \
8888, 0565, 2, 1, \
bilinear_src_8888_8_0565_process_last_pixel, \
bilinear_src_8888_8_0565_process_two_pixels, \
bilinear_src_8888_8_0565_process_four_pixels, \
bilinear_src_8888_8_0565_process_pixblock_head, \
bilinear_src_8888_8_0565_process_pixblock_tail, \
bilinear_src_8888_8_0565_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \
0565, 8888, 1, 2, \
bilinear_src_0565_8_x888_process_last_pixel, \
bilinear_src_0565_8_x888_process_two_pixels, \
bilinear_src_0565_8_x888_process_four_pixels, \
bilinear_src_0565_8_x888_process_pixblock_head, \
bilinear_src_0565_8_x888_process_pixblock_tail, \
bilinear_src_0565_8_x888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \
0565, 0565, 1, 1, \
bilinear_src_0565_8_0565_process_last_pixel, \
bilinear_src_0565_8_0565_process_two_pixels, \
bilinear_src_0565_8_0565_process_four_pixels, \
bilinear_src_0565_8_0565_process_pixblock_head, \
bilinear_src_0565_8_0565_process_pixblock_tail, \
bilinear_src_0565_8_0565_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \
8888, 8888, 2, 2, \
bilinear_over_8888_8888_process_last_pixel, \
bilinear_over_8888_8888_process_two_pixels, \
bilinear_over_8888_8888_process_four_pixels, \
bilinear_over_8888_8888_process_pixblock_head, \
bilinear_over_8888_8888_process_pixblock_tail, \
bilinear_over_8888_8888_process_pixblock_tail_head, \
4, 28, 0
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \
8888, 8888, 2, 2, \
bilinear_over_8888_8_8888_process_last_pixel, \
bilinear_over_8888_8_8888_process_two_pixels, \
bilinear_over_8888_8_8888_process_four_pixels, \
bilinear_over_8888_8_8888_process_pixblock_head, \
bilinear_over_8888_8_8888_process_pixblock_tail, \
bilinear_over_8888_8_8888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \
8888, 8888, 2, 2, \
bilinear_add_8888_8888_process_last_pixel, \
bilinear_add_8888_8888_process_two_pixels, \
bilinear_add_8888_8888_process_four_pixels, \
bilinear_add_8888_8888_process_pixblock_head, \
bilinear_add_8888_8888_process_pixblock_tail, \
bilinear_add_8888_8888_process_pixblock_tail_head, \
4, 28, 0
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \
8888, 8888, 2, 2, \
bilinear_add_8888_8_8888_process_last_pixel, \
bilinear_add_8888_8_8888_process_two_pixels, \
bilinear_add_8888_8_8888_process_four_pixels, \
bilinear_add_8888_8_8888_process_pixblock_head, \
bilinear_add_8888_8_8888_process_pixblock_tail, \
bilinear_add_8888_8_8888_process_pixblock_tail_head, \
4, 28, BILINEAR_FLAG_USE_MASK
|
iMAGRAY/Shelldone | 4,651 | deps/cairo/pixman/pixman/pixman-arm-simd-asm-scaled.S | /*
* Copyright © 2008 Mozilla Corporation
* Copyright © 2010 Nokia Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Mozilla Corporation not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Mozilla Corporation makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Author: Jeff Muizelaar (jeff@infidigm.net)
*
*/
/* Prevent the stack from becoming executable */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.arch armv6
.object_arch armv4
.arm
.altmacro
.p2align 2
#include "pixman-arm-asm.h"
/*
* Note: This code is only using armv5te instructions (not even armv6),
* but is scheduled for ARM Cortex-A8 pipeline. So it might need to
* be split into a few variants, tuned for each microarchitecture.
*
* TODO: In order to get good performance on ARM9/ARM11 cores (which don't
* have efficient write combining), it needs to be changed to use 16-byte
* aligned writes using STM instruction.
*
* Nearest scanline scaler macro template uses the following arguments:
* fname - name of the function to generate
* bpp_shift - (1 << bpp_shift) is the size of pixel in bytes
* t - type suffix for LDR/STR instructions
* prefetch_distance - prefetch in the source image by that many
* pixels ahead
* prefetch_braking_distance - stop prefetching when that many pixels are
* remaining before the end of scanline
*/
.macro generate_nearest_scanline_func fname, bpp_shift, t, \
prefetch_distance, \
prefetch_braking_distance
pixman_asm_function fname
W .req r0
DST .req r1
SRC .req r2
VX .req r3
UNIT_X .req ip
TMP1 .req r4
TMP2 .req r5
VXMASK .req r6
PF_OFFS .req r7
SRC_WIDTH_FIXED .req r8
ldr UNIT_X, [sp]
push {r4, r5, r6, r7, r8, r10}
mvn VXMASK, #((1 << bpp_shift) - 1)
ldr SRC_WIDTH_FIXED, [sp, #28]
/* define helper macro */
.macro scale_2_pixels
ldr&t TMP1, [SRC, TMP1]
and TMP2, VXMASK, VX, asr #(16 - bpp_shift)
adds VX, VX, UNIT_X
str&t TMP1, [DST], #(1 << bpp_shift)
9: subpls VX, VX, SRC_WIDTH_FIXED
bpl 9b
ldr&t TMP2, [SRC, TMP2]
and TMP1, VXMASK, VX, asr #(16 - bpp_shift)
adds VX, VX, UNIT_X
str&t TMP2, [DST], #(1 << bpp_shift)
9: subpls VX, VX, SRC_WIDTH_FIXED
bpl 9b
.endm
/* now do the scaling */
and TMP1, VXMASK, VX, asr #(16 - bpp_shift)
adds VX, VX, UNIT_X
9: subpls VX, VX, SRC_WIDTH_FIXED
bpl 9b
subs W, W, #(8 + prefetch_braking_distance)
blt 2f
/* calculate prefetch offset */
mov PF_OFFS, #prefetch_distance
mla PF_OFFS, UNIT_X, PF_OFFS, VX
1: /* main loop, process 8 pixels per iteration with prefetch */
pld [SRC, PF_OFFS, asr #(16 - bpp_shift)]
add PF_OFFS, UNIT_X, lsl #3
scale_2_pixels
scale_2_pixels
scale_2_pixels
scale_2_pixels
subs W, W, #8
bge 1b
2:
subs W, W, #(4 - 8 - prefetch_braking_distance)
blt 2f
1: /* process the remaining pixels */
scale_2_pixels
scale_2_pixels
subs W, W, #4
bge 1b
2:
tst W, #2
beq 2f
scale_2_pixels
2:
tst W, #1
ldrne&t TMP1, [SRC, TMP1]
strne&t TMP1, [DST]
/* cleanup helper macro */
.purgem scale_2_pixels
.unreq DST
.unreq SRC
.unreq W
.unreq VX
.unreq UNIT_X
.unreq TMP1
.unreq TMP2
.unreq VXMASK
.unreq PF_OFFS
.unreq SRC_WIDTH_FIXED
/* return */
pop {r4, r5, r6, r7, r8, r10}
bx lr
.endfunc
.endm
generate_nearest_scanline_func \
pixman_scaled_nearest_scanline_0565_0565_SRC_asm_armv6, 1, h, 80, 32
generate_nearest_scanline_func \
pixman_scaled_nearest_scanline_8888_8888_SRC_asm_armv6, 2, , 48, 32
|
iMAGRAY/Shelldone | 120,733 | deps/cairo/pixman/pixman/pixman-mips-dspr2-asm.S | /*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nemanja Lukic (nemanja.lukic@rt-rk.com)
*/
#include "pixman-private.h"
#include "pixman-mips-dspr2-asm.h"
LEAF_MIPS_DSPR2(pixman_fill_buff16_mips)
/*
* a0 - *dest
* a1 - count (bytes)
* a2 - value to fill buffer with
*/
beqz a1, 3f
andi t1, a0, 0x0002
beqz t1, 0f /* check if address is 4-byte aligned */
nop
sh a2, 0(a0)
addiu a0, a0, 2
addiu a1, a1, -2
0:
srl t1, a1, 5 /* t1 how many multiples of 32 bytes */
replv.ph a2, a2 /* replicate fill value (16bit) in a2 */
beqz t1, 2f
nop
1:
addiu t1, t1, -1
beqz t1, 11f
addiu a1, a1, -32
pref 30, 32(a0)
sw a2, 0(a0)
sw a2, 4(a0)
sw a2, 8(a0)
sw a2, 12(a0)
sw a2, 16(a0)
sw a2, 20(a0)
sw a2, 24(a0)
sw a2, 28(a0)
b 1b
addiu a0, a0, 32
11:
sw a2, 0(a0)
sw a2, 4(a0)
sw a2, 8(a0)
sw a2, 12(a0)
sw a2, 16(a0)
sw a2, 20(a0)
sw a2, 24(a0)
sw a2, 28(a0)
addiu a0, a0, 32
2:
blez a1, 3f
addiu a1, a1, -2
sh a2, 0(a0)
b 2b
addiu a0, a0, 2
3:
jr ra
nop
END(pixman_fill_buff16_mips)
LEAF_MIPS32R2(pixman_fill_buff32_mips)
/*
* a0 - *dest
* a1 - count (bytes)
* a2 - value to fill buffer with
*/
beqz a1, 3f
nop
srl t1, a1, 5 /* t1 how many multiples of 32 bytes */
beqz t1, 2f
nop
1:
addiu t1, t1, -1
beqz t1, 11f
addiu a1, a1, -32
pref 30, 32(a0)
sw a2, 0(a0)
sw a2, 4(a0)
sw a2, 8(a0)
sw a2, 12(a0)
sw a2, 16(a0)
sw a2, 20(a0)
sw a2, 24(a0)
sw a2, 28(a0)
b 1b
addiu a0, a0, 32
11:
sw a2, 0(a0)
sw a2, 4(a0)
sw a2, 8(a0)
sw a2, 12(a0)
sw a2, 16(a0)
sw a2, 20(a0)
sw a2, 24(a0)
sw a2, 28(a0)
addiu a0, a0, 32
2:
blez a1, 3f
addiu a1, a1, -4
sw a2, 0(a0)
b 2b
addiu a0, a0, 4
3:
jr ra
nop
END(pixman_fill_buff32_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_8888_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8r8g8b8)
* a2 - w
*/
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
li t4, 0xf800f800
li t5, 0x07e007e0
li t6, 0x001f001f
1:
lw t0, 0(a1)
lw t1, 4(a1)
addiu a1, a1, 8
addiu a2, a2, -2
CONVERT_2x8888_TO_2x0565 t0, t1, t2, t3, t4, t5, t6, t7, t8
sh t2, 0(a0)
sh t3, 2(a0)
addiu t2, a2, -1
bgtz t2, 1b
addiu a0, a0, 4
2:
beqz a2, 3f
nop
lw t0, 0(a1)
CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3
sh t1, 0(a0)
3:
j ra
nop
END(pixman_composite_src_8888_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_0565_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (r5g6b5)
* a2 - w
*/
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
li t4, 0x07e007e0
li t5, 0x001F001F
1:
lhu t0, 0(a1)
lhu t1, 2(a1)
addiu a1, a1, 4
addiu a2, a2, -2
CONVERT_2x0565_TO_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
sw t2, 0(a0)
sw t3, 4(a0)
addiu t2, a2, -1
bgtz t2, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
lhu t0, 0(a1)
CONVERT_1x0565_TO_1x8888 t0, t1, t2, t3
sw t1, 0(a0)
3:
j ra
nop
END(pixman_composite_src_0565_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_x888_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (x8r8g8b8)
* a2 - w
*/
beqz a2, 4f
nop
li t9, 0xff000000
srl t8, a2, 3 /* t1 = how many multiples of 8 src pixels */
beqz t8, 3f /* branch if less than 8 src pixels */
nop
1:
addiu t8, t8, -1
beqz t8, 2f
addiu a2, a2, -8
pref 0, 32(a1)
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 16(a1)
lw t5, 20(a1)
lw t6, 24(a1)
lw t7, 28(a1)
addiu a1, a1, 32
or t0, t0, t9
or t1, t1, t9
or t2, t2, t9
or t3, t3, t9
or t4, t4, t9
or t5, t5, t9
or t6, t6, t9
or t7, t7, t9
pref 30, 32(a0)
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
b 1b
addiu a0, a0, 32
2:
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 16(a1)
lw t5, 20(a1)
lw t6, 24(a1)
lw t7, 28(a1)
addiu a1, a1, 32
or t0, t0, t9
or t1, t1, t9
or t2, t2, t9
or t3, t3, t9
or t4, t4, t9
or t5, t5, t9
or t6, t6, t9
or t7, t7, t9
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
beqz a2, 4f
addiu a0, a0, 32
3:
lw t0, 0(a1)
addiu a1, a1, 4
addiu a2, a2, -1
or t1, t0, t9
sw t1, 0(a0)
bnez a2, 3b
addiu a0, a0, 4
4:
jr ra
nop
END(pixman_composite_src_x888_8888_asm_mips)
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
LEAF_MIPS_DSPR2(pixman_composite_src_0888_8888_rev_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (b8g8r8)
* a2 - w
*/
beqz a2, 6f
nop
lui t8, 0xff00;
srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */
beqz t9, 4f /* branch if less than 4 src pixels */
nop
li t0, 0x1
li t1, 0x2
li t2, 0x3
andi t3, a1, 0x3
beq t3, t0, 1f
nop
beq t3, t1, 2f
nop
beq t3, t2, 3f
nop
0:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 0(a1) /* t0 = R2 | B1 | G1 | R1 */
lw t1, 4(a1) /* t1 = G3 | R3 | B2 | G2 */
lw t2, 8(a1) /* t2 = B4 | G4 | R4 | B3 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = B1 | R2 | R1 | G1 */
wsbh t1, t1 /* t1 = R3 | G3 | G2 | B2 */
wsbh t2, t2 /* t2 = G4 | B4 | B3 | R4 */
packrl.ph t3, t1, t0 /* t3 = G2 | B2 | B1 | R2 */
packrl.ph t4, t0, t0 /* t4 = R1 | G1 | B1 | R2 */
rotr t3, t3, 16 /* t3 = B1 | R2 | G2 | B2 */
or t3, t3, t8 /* t3 = FF | R2 | G2 | B2 */
srl t4, t4, 8 /* t4 = 0 | R1 | G1 | B1 */
or t4, t4, t8 /* t4 = FF | R1 | G1 | B1 */
packrl.ph t5, t2, t1 /* t5 = B3 | R4 | R3 | G3 */
rotr t5, t5, 24 /* t5 = R4 | R3 | G3 | B3 */
or t5, t5, t8 /* t5 = FF | R3 | G3 | B3 */
rotr t2, t2, 16 /* t2 = B3 | R4 | G4 | B4 */
or t2, t2, t8 /* t5 = FF | R3 | G3 | B3 */
sw t4, 0(a0)
sw t3, 4(a0)
sw t5, 8(a0)
sw t2, 12(a0)
b 0b
addiu a0, a0, 16
1:
lbu t6, 0(a1) /* t6 = 0 | 0 | 0 | R1 */
lhu t7, 1(a1) /* t7 = 0 | 0 | B1 | G1 */
sll t6, t6, 16 /* t6 = 0 | R1 | 0 | 0 */
wsbh t7, t7 /* t7 = 0 | 0 | G1 | B1 */
or t7, t6, t7 /* t7 = 0 | R1 | G1 | B1 */
11:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 3(a1) /* t0 = R3 | B2 | G2 | R2 */
lw t1, 7(a1) /* t1 = G4 | R4 | B3 | G3 */
lw t2, 11(a1) /* t2 = B5 | G5 | R5 | B4 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = B2 | R3 | R2 | G2 */
wsbh t1, t1 /* t1 = R4 | G4 | G3 | B3 */
wsbh t2, t2 /* t2 = G5 | B5 | B4 | R5 */
packrl.ph t3, t1, t0 /* t3 = G3 | B3 | B2 | R3 */
packrl.ph t4, t2, t1 /* t4 = B4 | R5 | R4 | G4 */
rotr t0, t0, 24 /* t0 = R3 | R2 | G2 | B2 */
rotr t3, t3, 16 /* t3 = B2 | R3 | G3 | B3 */
rotr t4, t4, 24 /* t4 = R5 | R4 | G4 | B4 */
or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */
or t0, t0, t8 /* t0 = FF | R2 | G2 | B2 */
or t3, t3, t8 /* t1 = FF | R3 | G3 | B3 */
or t4, t4, t8 /* t3 = FF | R4 | G4 | B4 */
sw t7, 0(a0)
sw t0, 4(a0)
sw t3, 8(a0)
sw t4, 12(a0)
rotr t7, t2, 16 /* t7 = xx | R5 | G5 | B5 */
b 11b
addiu a0, a0, 16
2:
lhu t7, 0(a1) /* t7 = 0 | 0 | G1 | R1 */
wsbh t7, t7 /* t7 = 0 | 0 | R1 | G1 */
21:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 2(a1) /* t0 = B2 | G2 | R2 | B1 */
lw t1, 6(a1) /* t1 = R4 | B3 | G3 | R3 */
lw t2, 10(a1) /* t2 = G5 | R5 | B4 | G4 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = G2 | B2 | B1 | R2 */
wsbh t1, t1 /* t1 = B3 | R4 | R3 | G3 */
wsbh t2, t2 /* t2 = R5 | G5 | G4 | B4 */
precr_sra.ph.w t7, t0, 0 /* t7 = R1 | G1 | B1 | R2 */
rotr t0, t0, 16 /* t0 = B1 | R2 | G2 | B2 */
packrl.ph t3, t2, t1 /* t3 = G4 | B4 | B3 | R4 */
rotr t1, t1, 24 /* t1 = R4 | R3 | G3 | B3 */
srl t7, t7, 8 /* t7 = 0 | R1 | G1 | B1 */
rotr t3, t3, 16 /* t3 = B3 | R4 | G4 | B4 */
or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */
or t0, t0, t8 /* t0 = FF | R2 | G2 | B2 */
or t1, t1, t8 /* t1 = FF | R3 | G3 | B3 */
or t3, t3, t8 /* t3 = FF | R4 | G4 | B4 */
sw t7, 0(a0)
sw t0, 4(a0)
sw t1, 8(a0)
sw t3, 12(a0)
srl t7, t2, 16 /* t7 = 0 | 0 | R5 | G5 */
b 21b
addiu a0, a0, 16
3:
lbu t7, 0(a1) /* t7 = 0 | 0 | 0 | R1 */
31:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 1(a1) /* t0 = G2 | R2 | B1 | G1 */
lw t1, 5(a1) /* t1 = B3 | G3 | R3 | B2 */
lw t2, 9(a1) /* t2 = R5 | B4 | G4 | R4 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = R2 | G2 | G1 | B1 */
wsbh t1, t1 /* t1 = G3 | B3 | B2 | R3 */
wsbh t2, t2 /* t2 = B4 | R5 | R4 | G4 */
precr_sra.ph.w t7, t0, 0 /* t7 = xx | R1 | G1 | B1 */
packrl.ph t3, t1, t0 /* t3 = B2 | R3 | R2 | G2 */
rotr t1, t1, 16 /* t1 = B2 | R3 | G3 | B3 */
rotr t4, t2, 24 /* t4 = R5 | R4 | G4 | B4 */
rotr t3, t3, 24 /* t3 = R3 | R2 | G2 | B2 */
or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */
or t3, t3, t8 /* t3 = FF | R2 | G2 | B2 */
or t1, t1, t8 /* t1 = FF | R3 | G3 | B3 */
or t4, t4, t8 /* t4 = FF | R4 | G4 | B4 */
sw t7, 0(a0)
sw t3, 4(a0)
sw t1, 8(a0)
sw t4, 12(a0)
srl t7, t2, 16 /* t7 = 0 | 0 | xx | R5 */
b 31b
addiu a0, a0, 16
4:
beqz a2, 6f
nop
5:
lbu t0, 0(a1) /* t0 = 0 | 0 | 0 | R */
lbu t1, 1(a1) /* t1 = 0 | 0 | 0 | G */
lbu t2, 2(a1) /* t2 = 0 | 0 | 0 | B */
addiu a1, a1, 3
sll t0, t0, 16 /* t2 = 0 | R | 0 | 0 */
sll t1, t1, 8 /* t1 = 0 | 0 | G | 0 */
or t2, t2, t1 /* t2 = 0 | 0 | G | B */
or t2, t2, t0 /* t2 = 0 | R | G | B */
or t2, t2, t8 /* t2 = FF | R | G | B */
sw t2, 0(a0)
addiu a2, a2, -1
bnez a2, 5b
addiu a0, a0, 4
6:
j ra
nop
END(pixman_composite_src_0888_8888_rev_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_0888_0565_rev_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (b8g8r8)
* a2 - w
*/
SAVE_REGS_ON_STACK 0, v0, v1
beqz a2, 6f
nop
li t6, 0xf800f800
li t7, 0x07e007e0
li t8, 0x001F001F
srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */
beqz t9, 4f /* branch if less than 4 src pixels */
nop
li t0, 0x1
li t1, 0x2
li t2, 0x3
andi t3, a1, 0x3
beq t3, t0, 1f
nop
beq t3, t1, 2f
nop
beq t3, t2, 3f
nop
0:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 0(a1) /* t0 = R2 | B1 | G1 | R1 */
lw t1, 4(a1) /* t1 = G3 | R3 | B2 | G2 */
lw t2, 8(a1) /* t2 = B4 | G4 | R4 | B3 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = B1 | R2 | R1 | G1 */
wsbh t1, t1 /* t1 = R3 | G3 | G2 | B2 */
wsbh t2, t2 /* t2 = G4 | B4 | B3 | R4 */
packrl.ph t3, t1, t0 /* t3 = G2 | B2 | B1 | R2 */
packrl.ph t4, t0, t0 /* t4 = R1 | G1 | B1 | R2 */
rotr t3, t3, 16 /* t3 = B1 | R2 | G2 | B2 */
srl t4, t4, 8 /* t4 = 0 | R1 | G1 | B1 */
packrl.ph t5, t2, t1 /* t5 = B3 | R4 | R3 | G3 */
rotr t5, t5, 24 /* t5 = R4 | R3 | G3 | B3 */
rotr t2, t2, 16 /* t2 = B3 | R4 | G4 | B4 */
CONVERT_2x8888_TO_2x0565 t4, t3, t4, t3, t6, t7, t8, v0, v1
CONVERT_2x8888_TO_2x0565 t5, t2, t5, t2, t6, t7, t8, v0, v1
sh t4, 0(a0)
sh t3, 2(a0)
sh t5, 4(a0)
sh t2, 6(a0)
b 0b
addiu a0, a0, 8
1:
lbu t4, 0(a1) /* t4 = 0 | 0 | 0 | R1 */
lhu t5, 1(a1) /* t5 = 0 | 0 | B1 | G1 */
sll t4, t4, 16 /* t4 = 0 | R1 | 0 | 0 */
wsbh t5, t5 /* t5 = 0 | 0 | G1 | B1 */
or t5, t4, t5 /* t5 = 0 | R1 | G1 | B1 */
11:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 3(a1) /* t0 = R3 | B2 | G2 | R2 */
lw t1, 7(a1) /* t1 = G4 | R4 | B3 | G3 */
lw t2, 11(a1) /* t2 = B5 | G5 | R5 | B4 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = B2 | R3 | R2 | G2 */
wsbh t1, t1 /* t1 = R4 | G4 | G3 | B3 */
wsbh t2, t2 /* t2 = G5 | B5 | B4 | R5 */
packrl.ph t3, t1, t0 /* t3 = G3 | B3 | B2 | R3 */
packrl.ph t4, t2, t1 /* t4 = B4 | R5 | R4 | G4 */
rotr t0, t0, 24 /* t0 = R3 | R2 | G2 | B2 */
rotr t3, t3, 16 /* t3 = B2 | R3 | G3 | B3 */
rotr t4, t4, 24 /* t4 = R5 | R4 | G4 | B4 */
CONVERT_2x8888_TO_2x0565 t5, t0, t5, t0, t6, t7, t8, v0, v1
CONVERT_2x8888_TO_2x0565 t3, t4, t3, t4, t6, t7, t8, v0, v1
sh t5, 0(a0)
sh t0, 2(a0)
sh t3, 4(a0)
sh t4, 6(a0)
rotr t5, t2, 16 /* t5 = xx | R5 | G5 | B5 */
b 11b
addiu a0, a0, 8
2:
lhu t5, 0(a1) /* t5 = 0 | 0 | G1 | R1 */
wsbh t5, t5 /* t5 = 0 | 0 | R1 | G1 */
21:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 2(a1) /* t0 = B2 | G2 | R2 | B1 */
lw t1, 6(a1) /* t1 = R4 | B3 | G3 | R3 */
lw t2, 10(a1) /* t2 = G5 | R5 | B4 | G4 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = G2 | B2 | B1 | R2 */
wsbh t1, t1 /* t1 = B3 | R4 | R3 | G3 */
wsbh t2, t2 /* t2 = R5 | G5 | G4 | B4 */
precr_sra.ph.w t5, t0, 0 /* t5 = R1 | G1 | B1 | R2 */
rotr t0, t0, 16 /* t0 = B1 | R2 | G2 | B2 */
packrl.ph t3, t2, t1 /* t3 = G4 | B4 | B3 | R4 */
rotr t1, t1, 24 /* t1 = R4 | R3 | G3 | B3 */
srl t5, t5, 8 /* t5 = 0 | R1 | G1 | B1 */
rotr t3, t3, 16 /* t3 = B3 | R4 | G4 | B4 */
CONVERT_2x8888_TO_2x0565 t5, t0, t5, t0, t6, t7, t8, v0, v1
CONVERT_2x8888_TO_2x0565 t1, t3, t1, t3, t6, t7, t8, v0, v1
sh t5, 0(a0)
sh t0, 2(a0)
sh t1, 4(a0)
sh t3, 6(a0)
srl t5, t2, 16 /* t5 = 0 | 0 | R5 | G5 */
b 21b
addiu a0, a0, 8
3:
lbu t5, 0(a1) /* t5 = 0 | 0 | 0 | R1 */
31:
beqz t9, 4f
addiu t9, t9, -1
lw t0, 1(a1) /* t0 = G2 | R2 | B1 | G1 */
lw t1, 5(a1) /* t1 = B3 | G3 | R3 | B2 */
lw t2, 9(a1) /* t2 = R5 | B4 | G4 | R4 */
addiu a1, a1, 12
addiu a2, a2, -4
wsbh t0, t0 /* t0 = R2 | G2 | G1 | B1 */
wsbh t1, t1 /* t1 = G3 | B3 | B2 | R3 */
wsbh t2, t2 /* t2 = B4 | R5 | R4 | G4 */
precr_sra.ph.w t5, t0, 0 /* t5 = xx | R1 | G1 | B1 */
packrl.ph t3, t1, t0 /* t3 = B2 | R3 | R2 | G2 */
rotr t1, t1, 16 /* t1 = B2 | R3 | G3 | B3 */
rotr t4, t2, 24 /* t4 = R5 | R4 | G4 | B4 */
rotr t3, t3, 24 /* t3 = R3 | R2 | G2 | B2 */
CONVERT_2x8888_TO_2x0565 t5, t3, t5, t3, t6, t7, t8, v0, v1
CONVERT_2x8888_TO_2x0565 t1, t4, t1, t4, t6, t7, t8, v0, v1
sh t5, 0(a0)
sh t3, 2(a0)
sh t1, 4(a0)
sh t4, 6(a0)
srl t5, t2, 16 /* t5 = 0 | 0 | xx | R5 */
b 31b
addiu a0, a0, 8
4:
beqz a2, 6f
nop
5:
lbu t0, 0(a1) /* t0 = 0 | 0 | 0 | R */
lbu t1, 1(a1) /* t1 = 0 | 0 | 0 | G */
lbu t2, 2(a1) /* t2 = 0 | 0 | 0 | B */
addiu a1, a1, 3
sll t0, t0, 16 /* t2 = 0 | R | 0 | 0 */
sll t1, t1, 8 /* t1 = 0 | 0 | G | 0 */
or t2, t2, t1 /* t2 = 0 | 0 | G | B */
or t2, t2, t0 /* t2 = 0 | R | G | B */
CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5
sh t3, 0(a0)
addiu a2, a2, -1
bnez a2, 5b
addiu a0, a0, 2
6:
RESTORE_REGS_FROM_STACK 0, v0, v1
j ra
nop
END(pixman_composite_src_0888_0565_rev_asm_mips)
#endif
LEAF_MIPS_DSPR2(pixman_composite_src_pixbuf_8888_asm_mips)
/*
* a0 - dst (a8b8g8r8)
* a1 - src (a8r8g8b8)
* a2 - w
*/
SAVE_REGS_ON_STACK 0, v0
li v0, 0x00ff00ff
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1)
lw t1, 4(a1)
addiu a1, a1, 8
addiu a2, a2, -2
srl t2, t0, 24
srl t3, t1, 24
MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t2, t3, t0, t1, v0, t4, t5, t6, t7, t8, t9
sll t0, t0, 8
sll t1, t1, 8
andi t2, t2, 0xff
andi t3, t3, 0xff
or t0, t0, t2
or t1, t1, t3
wsbh t0, t0
wsbh t1, t1
rotr t0, t0, 16
rotr t1, t1, 16
sw t0, 0(a0)
sw t1, 4(a0)
addiu t2, a2, -1
bgtz t2, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
lw t0, 0(a1)
srl t1, t0, 24
MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t3, t4, t5
sll t0, t0, 8
andi t1, t1, 0xff
or t0, t0, t1
wsbh t0, t0
rotr t0, t0, 16
sw t0, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, v0
j ra
nop
END(pixman_composite_src_pixbuf_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_rpixbuf_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - w
*/
SAVE_REGS_ON_STACK 0, v0
li v0, 0x00ff00ff
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1)
lw t1, 4(a1)
addiu a1, a1, 8
addiu a2, a2, -2
srl t2, t0, 24
srl t3, t1, 24
MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t2, t3, t0, t1, v0, t4, t5, t6, t7, t8, t9
sll t0, t0, 8
sll t1, t1, 8
andi t2, t2, 0xff
andi t3, t3, 0xff
or t0, t0, t2
or t1, t1, t3
rotr t0, t0, 8
rotr t1, t1, 8
sw t0, 0(a0)
sw t1, 4(a0)
addiu t2, a2, -1
bgtz t2, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
lw t0, 0(a1)
srl t1, t0, 24
MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t3, t4, t5
sll t0, t0, 8
andi t1, t1, 0xff
or t0, t0, t1
rotr t0, t0, 8
sw t0, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, v0
j ra
nop
END(pixman_composite_src_rpixbuf_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_n_8_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, v0
li v0, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
/* a1 = source (32bit constant) */
lbu t0, 0(a2) /* t2 = mask (a8) */
lbu t1, 1(a2) /* t3 = mask (a8) */
addiu a2, a2, 2
MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, t2, t3, v0, t4, t5, t6, t7, t8, t9
sw t2, 0(a0)
sw t3, 4(a0)
addiu a3, a3, -2
addiu t2, a3, -1
bgtz t2, 1b
addiu a0, a0, 8
beqz a3, 3f
nop
2:
lbu t0, 0(a2)
addiu a2, a2, 1
MIPS_UN8x4_MUL_UN8 a1, t0, t1, v0, t3, t4, t5
sw t1, 0(a0)
addiu a3, a3, -1
addiu a0, a0, 4
3:
RESTORE_REGS_FROM_STACK 0, v0
j ra
nop
END(pixman_composite_src_n_8_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_src_n_8_8_asm_mips)
/*
* a0 - dst (a8)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
li t9, 0x00ff00ff
beqz a3, 3f
nop
srl t7, a3, 2 /* t7 = how many multiples of 4 dst pixels */
beqz t7, 1f /* branch if less than 4 src pixels */
nop
srl t8, a1, 24
replv.ph t8, t8
0:
beqz t7, 1f
addiu t7, t7, -1
lbu t0, 0(a2)
lbu t1, 1(a2)
lbu t2, 2(a2)
lbu t3, 3(a2)
addiu a2, a2, 4
precr_sra.ph.w t1, t0, 0
precr_sra.ph.w t3, t2, 0
precr.qb.ph t0, t3, t1
muleu_s.ph.qbl t2, t0, t8
muleu_s.ph.qbr t3, t0, t8
shra_r.ph t4, t2, 8
shra_r.ph t5, t3, 8
and t4, t4, t9
and t5, t5, t9
addq.ph t2, t2, t4
addq.ph t3, t3, t5
shra_r.ph t2, t2, 8
shra_r.ph t3, t3, 8
precr.qb.ph t2, t2, t3
sb t2, 0(a0)
srl t2, t2, 8
sb t2, 1(a0)
srl t2, t2, 8
sb t2, 2(a0)
srl t2, t2, 8
sb t2, 3(a0)
addiu a3, a3, -4
b 0b
addiu a0, a0, 4
1:
beqz a3, 3f
nop
srl t8, a1, 24
2:
lbu t0, 0(a2)
addiu a2, a2, 1
mul t2, t0, t8
shra_r.ph t3, t2, 8
andi t3, t3, 0x00ff
addq.ph t2, t2, t3
shra_r.ph t2, t2, 8
sb t2, 0(a0)
addiu a3, a3, -1
bnez a3, 2b
addiu a0, a0, 1
3:
j ra
nop
END(pixman_composite_src_n_8_8_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_8888_ca_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (32bit constant)
* a2 - mask (a8r8g8b8)
* a3 - w
*/
beqz a3, 8f
nop
SAVE_REGS_ON_STACK 8, s0, s1, s2, s3, s4, s5
li t6, 0xff
addiu t7, zero, -1 /* t7 = 0xffffffff */
srl t8, a1, 24 /* t8 = srca */
li t9, 0x00ff00ff
addiu t1, a3, -1
beqz t1, 4f /* last pixel */
nop
0:
lw t0, 0(a2) /* t0 = mask */
lw t1, 4(a2) /* t1 = mask */
addiu a3, a3, -2 /* w = w - 2 */
or t2, t0, t1
beqz t2, 3f /* if (t0 == 0) && (t1 == 0) */
addiu a2, a2, 8
and t2, t0, t1
beq t2, t7, 1f /* if (t0 == 0xffffffff) && (t1 == 0xffffffff) */
nop
//if(ma)
lw t2, 0(a0) /* t2 = dst */
lw t3, 4(a0) /* t3 = dst */
MIPS_2xUN8x4_MUL_2xUN8x4 a1, a1, t0, t1, t4, t5, t9, s0, s1, s2, s3, s4, s5
MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t8, t8, t0, t1, t9, s0, s1, s2, s3, s4, s5
not t0, t0
not t1, t1
MIPS_2xUN8x4_MUL_2xUN8x4 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5
addu_s.qb t2, t4, t2
addu_s.qb t3, t5, t3
sw t2, 0(a0)
sw t3, 4(a0)
addiu t1, a3, -1
bgtz t1, 0b
addiu a0, a0, 8
b 4f
nop
1:
//if (t0 == 0xffffffff) && (t1 == 0xffffffff):
beq t8, t6, 2f /* if (srca == 0xff) */
nop
lw t2, 0(a0) /* t2 = dst */
lw t3, 4(a0) /* t3 = dst */
not t0, a1
not t1, a1
srl t0, t0, 24
srl t1, t1, 24
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5
addu_s.qb t2, a1, t2
addu_s.qb t3, a1, t3
sw t2, 0(a0)
sw t3, 4(a0)
addiu t1, a3, -1
bgtz t1, 0b
addiu a0, a0, 8
b 4f
nop
2:
sw a1, 0(a0)
sw a1, 4(a0)
3:
addiu t1, a3, -1
bgtz t1, 0b
addiu a0, a0, 8
4:
beqz a3, 7f
nop
/* a1 = src */
lw t0, 0(a2) /* t0 = mask */
beqz t0, 7f /* if (t0 == 0) */
nop
beq t0, t7, 5f /* if (t0 == 0xffffffff) */
nop
//if(ma)
lw t1, 0(a0) /* t1 = dst */
MIPS_UN8x4_MUL_UN8x4 a1, t0, t2, t9, t3, t4, t5, s0
MIPS_UN8x4_MUL_UN8 t0, t8, t0, t9, t3, t4, t5
not t0, t0
MIPS_UN8x4_MUL_UN8x4 t1, t0, t1, t9, t3, t4, t5, s0
addu_s.qb t1, t2, t1
sw t1, 0(a0)
RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5
j ra
nop
5:
//if (t0 == 0xffffffff)
beq t8, t6, 6f /* if (srca == 0xff) */
nop
lw t1, 0(a0) /* t1 = dst */
not t0, a1
srl t0, t0, 24
MIPS_UN8x4_MUL_UN8 t1, t0, t1, t9, t2, t3, t4
addu_s.qb t1, a1, t1
sw t1, 0(a0)
RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5
j ra
nop
6:
sw a1, 0(a0)
7:
RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5
8:
j ra
nop
END(pixman_composite_over_n_8888_8888_ca_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_0565_ca_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (32bit constant)
* a2 - mask (a8r8g8b8)
* a3 - w
*/
beqz a3, 8f
nop
SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8
li t6, 0xff
addiu t7, zero, -1 /* t7 = 0xffffffff */
srl t8, a1, 24 /* t8 = srca */
li t9, 0x00ff00ff
li s6, 0xf800f800
li s7, 0x07e007e0
li s8, 0x001F001F
addiu t1, a3, -1
beqz t1, 4f /* last pixel */
nop
0:
lw t0, 0(a2) /* t0 = mask */
lw t1, 4(a2) /* t1 = mask */
addiu a3, a3, -2 /* w = w - 2 */
or t2, t0, t1
beqz t2, 3f /* if (t0 == 0) && (t1 == 0) */
addiu a2, a2, 8
and t2, t0, t1
beq t2, t7, 1f /* if (t0 == 0xffffffff) && (t1 == 0xffffffff) */
nop
//if(ma)
lhu t2, 0(a0) /* t2 = dst */
lhu t3, 2(a0) /* t3 = dst */
MIPS_2xUN8x4_MUL_2xUN8x4 a1, a1, t0, t1, t4, t5, t9, s0, s1, s2, s3, s4, s5
MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t8, t8, t0, t1, t9, s0, s1, s2, s3, s4, s5
not t0, t0
not t1, t1
CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, s7, s8, s0, s1, s2, s3
MIPS_2xUN8x4_MUL_2xUN8x4 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5
addu_s.qb t2, t4, t2
addu_s.qb t3, t5, t3
CONVERT_2x8888_TO_2x0565 t2, t3, t2, t3, s6, s7, s8, s0, s1
sh t2, 0(a0)
sh t3, 2(a0)
addiu t1, a3, -1
bgtz t1, 0b
addiu a0, a0, 4
b 4f
nop
1:
//if (t0 == 0xffffffff) && (t1 == 0xffffffff):
beq t8, t6, 2f /* if (srca == 0xff) */
nop
lhu t2, 0(a0) /* t2 = dst */
lhu t3, 2(a0) /* t3 = dst */
not t0, a1
not t1, a1
srl t0, t0, 24
srl t1, t1, 24
CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, s7, s8, s0, s1, s2, s3
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5
addu_s.qb t2, a1, t2
addu_s.qb t3, a1, t3
CONVERT_2x8888_TO_2x0565 t2, t3, t2, t3, s6, s7, s8, s0, s1
sh t2, 0(a0)
sh t3, 2(a0)
addiu t1, a3, -1
bgtz t1, 0b
addiu a0, a0, 4
b 4f
nop
2:
CONVERT_1x8888_TO_1x0565 a1, t2, s0, s1
sh t2, 0(a0)
sh t2, 2(a0)
3:
addiu t1, a3, -1
bgtz t1, 0b
addiu a0, a0, 4
4:
beqz a3, 7f
nop
/* a1 = src */
lw t0, 0(a2) /* t0 = mask */
beqz t0, 7f /* if (t0 == 0) */
nop
beq t0, t7, 5f /* if (t0 == 0xffffffff) */
nop
//if(ma)
lhu t1, 0(a0) /* t1 = dst */
MIPS_UN8x4_MUL_UN8x4 a1, t0, t2, t9, t3, t4, t5, s0
MIPS_UN8x4_MUL_UN8 t0, t8, t0, t9, t3, t4, t5
not t0, t0
CONVERT_1x0565_TO_1x8888 t1, s1, s2, s3
MIPS_UN8x4_MUL_UN8x4 s1, t0, s1, t9, t3, t4, t5, s0
addu_s.qb s1, t2, s1
CONVERT_1x8888_TO_1x0565 s1, t1, s0, s2
sh t1, 0(a0)
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8
j ra
nop
5:
//if (t0 == 0xffffffff)
beq t8, t6, 6f /* if (srca == 0xff) */
nop
lhu t1, 0(a0) /* t1 = dst */
not t0, a1
srl t0, t0, 24
CONVERT_1x0565_TO_1x8888 t1, s1, s2, s3
MIPS_UN8x4_MUL_UN8 s1, t0, s1, t9, t2, t3, t4
addu_s.qb s1, a1, s1
CONVERT_1x8888_TO_1x0565 s1, t1, s0, s2
sh t1, 0(a0)
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8
j ra
nop
6:
CONVERT_1x8888_TO_1x0565 a1, t1, s0, s2
sh t1, 0(a0)
7:
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8
8:
j ra
nop
END(pixman_composite_over_n_8888_0565_ca_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_8_8_asm_mips)
/*
* a0 - dst (a8)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, v0
li t9, 0x00ff00ff
beqz a3, 3f
nop
srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */
beqz v0, 1f /* branch if less than 4 src pixels */
nop
srl t8, a1, 24
replv.ph t8, t8
0:
beqz v0, 1f
addiu v0, v0, -1
lbu t0, 0(a2)
lbu t1, 1(a2)
lbu t2, 2(a2)
lbu t3, 3(a2)
lbu t4, 0(a0)
lbu t5, 1(a0)
lbu t6, 2(a0)
lbu t7, 3(a0)
addiu a2, a2, 4
precr_sra.ph.w t1, t0, 0
precr_sra.ph.w t3, t2, 0
precr_sra.ph.w t5, t4, 0
precr_sra.ph.w t7, t6, 0
precr.qb.ph t0, t3, t1
precr.qb.ph t1, t7, t5
muleu_s.ph.qbl t2, t0, t8
muleu_s.ph.qbr t3, t0, t8
shra_r.ph t4, t2, 8
shra_r.ph t5, t3, 8
and t4, t4, t9
and t5, t5, t9
addq.ph t2, t2, t4
addq.ph t3, t3, t5
shra_r.ph t2, t2, 8
shra_r.ph t3, t3, 8
precr.qb.ph t0, t2, t3
not t6, t0
preceu.ph.qbl t7, t6
preceu.ph.qbr t6, t6
muleu_s.ph.qbl t2, t1, t7
muleu_s.ph.qbr t3, t1, t6
shra_r.ph t4, t2, 8
shra_r.ph t5, t3, 8
and t4, t4, t9
and t5, t5, t9
addq.ph t2, t2, t4
addq.ph t3, t3, t5
shra_r.ph t2, t2, 8
shra_r.ph t3, t3, 8
precr.qb.ph t1, t2, t3
addu_s.qb t2, t0, t1
sb t2, 0(a0)
srl t2, t2, 8
sb t2, 1(a0)
srl t2, t2, 8
sb t2, 2(a0)
srl t2, t2, 8
sb t2, 3(a0)
addiu a3, a3, -4
b 0b
addiu a0, a0, 4
1:
beqz a3, 3f
nop
srl t8, a1, 24
2:
lbu t0, 0(a2)
lbu t1, 0(a0)
addiu a2, a2, 1
mul t2, t0, t8
shra_r.ph t3, t2, 8
andi t3, t3, 0x00ff
addq.ph t2, t2, t3
shra_r.ph t2, t2, 8
not t3, t2
andi t3, t3, 0x00ff
mul t4, t1, t3
shra_r.ph t5, t4, 8
andi t5, t5, 0x00ff
addq.ph t4, t4, t5
shra_r.ph t4, t4, 8
andi t4, t4, 0x00ff
addu_s.qb t2, t2, t4
sb t2, 0(a0)
addiu a3, a3, -1
bnez a3, 2b
addiu a0, a0, 1
3:
RESTORE_REGS_FROM_STACK 0, v0
j ra
nop
END(pixman_composite_over_n_8_8_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_8_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 4, s0, s1, s2, s3, s4
beqz a3, 4f
nop
li t4, 0x00ff00ff
li t5, 0xff
addiu t0, a3, -1
beqz t0, 3f /* last pixel */
srl t6, a1, 24 /* t6 = srca */
not s4, a1
beq t5, t6, 2f /* if (srca == 0xff) */
srl s4, s4, 24
1:
/* a1 = src */
lbu t0, 0(a2) /* t0 = mask */
lbu t1, 1(a2) /* t1 = mask */
or t2, t0, t1
beqz t2, 111f /* if (t0 == 0) && (t1 == 0) */
addiu a2, a2, 2
and t3, t0, t1
lw t2, 0(a0) /* t2 = dst */
beq t3, t5, 11f /* if (t0 == 0xff) && (t1 == 0xff) */
lw t3, 4(a0) /* t3 = dst */
MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, s0, s1, t4, t6, t7, t8, t9, s2, s3
not s2, s0
not s3, s1
srl s2, s2, 24
srl s3, s3, 24
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, s2, s3, t2, t3, t4, t0, t1, t6, t7, t8, t9
addu_s.qb s2, t2, s0
addu_s.qb s3, t3, s1
sw s2, 0(a0)
b 111f
sw s3, 4(a0)
11:
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, s4, s4, t2, t3, t4, t0, t1, t6, t7, t8, t9
addu_s.qb s2, t2, a1
addu_s.qb s3, t3, a1
sw s2, 0(a0)
sw s3, 4(a0)
111:
addiu a3, a3, -2
addiu t0, a3, -1
bgtz t0, 1b
addiu a0, a0, 8
b 3f
nop
2:
/* a1 = src */
lbu t0, 0(a2) /* t0 = mask */
lbu t1, 1(a2) /* t1 = mask */
or t2, t0, t1
beqz t2, 222f /* if (t0 == 0) && (t1 == 0) */
addiu a2, a2, 2
and t3, t0, t1
beq t3, t5, 22f /* if (t0 == 0xff) && (t1 == 0xff) */
nop
lw t2, 0(a0) /* t2 = dst */
lw t3, 4(a0) /* t3 = dst */
OVER_2x8888_2x8_2x8888 a1, a1, t0, t1, t2, t3, \
t6, t7, t4, t8, t9, s0, s1, s2, s3
sw t6, 0(a0)
b 222f
sw t7, 4(a0)
22:
sw a1, 0(a0)
sw a1, 4(a0)
222:
addiu a3, a3, -2
addiu t0, a3, -1
bgtz t0, 2b
addiu a0, a0, 8
3:
blez a3, 4f
nop
/* a1 = src */
lbu t0, 0(a2) /* t0 = mask */
beqz t0, 4f /* if (t0 == 0) */
addiu a2, a2, 1
move t3, a1
beq t0, t5, 31f /* if (t0 == 0xff) */
lw t1, 0(a0) /* t1 = dst */
MIPS_UN8x4_MUL_UN8 a1, t0, t3, t4, t6, t7, t8
31:
not t2, t3
srl t2, t2, 24
MIPS_UN8x4_MUL_UN8 t1, t2, t1, t4, t6, t7, t8
addu_s.qb t2, t1, t3
sw t2, 0(a0)
4:
RESTORE_REGS_FROM_STACK 4, s0, s1, s2, s3, s4
j ra
nop
END(pixman_composite_over_n_8_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_8_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8
beqz a3, 4f
nop
li t4, 0x00ff00ff
li t5, 0xff
li t6, 0xf800f800
li t7, 0x07e007e0
li t8, 0x001F001F
addiu t1, a3, -1
beqz t1, 3f /* last pixel */
srl t0, a1, 24 /* t0 = srca */
not v0, a1
beq t0, t5, 2f /* if (srca == 0xff) */
srl v0, v0, 24
1:
/* a1 = src */
lbu t0, 0(a2) /* t0 = mask */
lbu t1, 1(a2) /* t1 = mask */
or t2, t0, t1
beqz t2, 111f /* if (t0 == 0) && (t1 == 0) */
addiu a2, a2, 2
lhu t2, 0(a0) /* t2 = dst */
lhu t3, 2(a0) /* t3 = dst */
CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, t7, t8, t9, s2, s3, s4
and t9, t0, t1
beq t9, t5, 11f /* if (t0 == 0xff) && (t1 == 0xff) */
nop
MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, s2, s3, t4, t9, s4, s5, s6, s7, s8
not s4, s2
not s5, s3
srl s4, s4, 24
srl s5, s5, 24
MIPS_2xUN8x4_MUL_2xUN8 s0, s1, s4, s5, s0, s1, t4, t9, t0, t1, s6, s7, s8
addu_s.qb s4, s2, s0
addu_s.qb s5, s3, s1
CONVERT_2x8888_TO_2x0565 s4, s5, t2, t3, t6, t7, t8, s0, s1
sh t2, 0(a0)
b 111f
sh t3, 2(a0)
11:
MIPS_2xUN8x4_MUL_2xUN8 s0, s1, v0, v0, s0, s1, t4, t9, t0, t1, s6, s7, s8
addu_s.qb s4, a1, s0
addu_s.qb s5, a1, s1
CONVERT_2x8888_TO_2x0565 s4, s5, t2, t3, t6, t7, t8, s0, s1
sh t2, 0(a0)
sh t3, 2(a0)
111:
addiu a3, a3, -2
addiu t0, a3, -1
bgtz t0, 1b
addiu a0, a0, 4
b 3f
nop
2:
CONVERT_1x8888_TO_1x0565 a1, s0, s1, s2
21:
/* a1 = src */
lbu t0, 0(a2) /* t0 = mask */
lbu t1, 1(a2) /* t1 = mask */
or t2, t0, t1
beqz t2, 222f /* if (t0 == 0) && (t1 == 0) */
addiu a2, a2, 2
and t9, t0, t1
move s2, s0
beq t9, t5, 22f /* if (t0 == 0xff) && (t2 == 0xff) */
move s3, s0
lhu t2, 0(a0) /* t2 = dst */
lhu t3, 2(a0) /* t3 = dst */
CONVERT_2x0565_TO_2x8888 t2, t3, s2, s3, t7, t8, s4, s5, s6, s7
OVER_2x8888_2x8_2x8888 a1, a1, t0, t1, s2, s3, \
t2, t3, t4, t9, s4, s5, s6, s7, s8
CONVERT_2x8888_TO_2x0565 t2, t3, s2, s3, t6, t7, t8, s4, s5
22:
sh s2, 0(a0)
sh s3, 2(a0)
222:
addiu a3, a3, -2
addiu t0, a3, -1
bgtz t0, 21b
addiu a0, a0, 4
3:
blez a3, 4f
nop
/* a1 = src */
lbu t0, 0(a2) /* t0 = mask */
beqz t0, 4f /* if (t0 == 0) */
nop
lhu t1, 0(a0) /* t1 = dst */
CONVERT_1x0565_TO_1x8888 t1, t2, t3, t7
beq t0, t5, 31f /* if (t0 == 0xff) */
move t3, a1
MIPS_UN8x4_MUL_UN8 a1, t0, t3, t4, t7, t8, t9
31:
not t6, t3
srl t6, t6, 24
MIPS_UN8x4_MUL_UN8 t2, t6, t2, t4, t7, t8, t9
addu_s.qb t1, t2, t3
CONVERT_1x8888_TO_1x0565 t1, t2, t3, t7
sh t2, 0(a0)
4:
RESTORE_REGS_FROM_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8
j ra
nop
END(pixman_composite_over_n_8_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_n_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - mask (32bit constant)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0
li t4, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
srl a2, a2, 24
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
/* a2 = mask (32bit constant) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
addiu a1, a1, 8
OVER_2x8888_2x8_2x8888 t0, t1, a2, a2, t2, t3, \
t5, t6, t4, t7, t8, t9, t0, t1, s0
sw t5, 0(a0)
sw t6, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
/* a2 = mask (32bit constant) */
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
OVER_8888_8_8888 t0, a2, t1, t3, t4, t5, t6, t7, t8
sw t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0
j ra
nop
END(pixman_composite_over_8888_n_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_n_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8r8g8b8)
* a2 - mask (32bit constant)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2, s3
li t6, 0x00ff00ff
li t7, 0xf800f800
li t8, 0x07e007e0
li t9, 0x001F001F
beqz a3, 3f
nop
srl a2, a2, 24
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
/* a2 = mask (32bit constant) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
lhu t3, 2(a0) /* t2 = destination (r5g6b5) */
addiu a1, a1, 8
CONVERT_2x0565_TO_2x8888 t2, t3, t4, t5, t8, t9, s0, s1, t2, t3
OVER_2x8888_2x8_2x8888 t0, t1, a2, a2, t4, t5, \
t2, t3, t6, t0, t1, s0, s1, s2, s3
CONVERT_2x8888_TO_2x0565 t2, t3, t4, t5, t7, t8, t9, s0, s1
sh t4, 0(a0)
sh t5, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
/* a2 = mask (32bit constant) */
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t1, t2, t4, t5
OVER_8888_8_8888 t0, a2, t2, t1, t6, t3, t4, t5, t7
CONVERT_1x8888_TO_1x0565 t1, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3
j ra
nop
END(pixman_composite_over_8888_n_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_0565_n_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (r5g6b5)
* a2 - mask (32bit constant)
* a3 - w
*/
SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5
li t6, 0x00ff00ff
li t7, 0xf800f800
li t8, 0x07e007e0
li t9, 0x001F001F
beqz a3, 3f
nop
srl a2, a2, 24
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lhu t0, 0(a1) /* t0 = source (r5g6b5) */
lhu t1, 2(a1) /* t1 = source (r5g6b5) */
/* a2 = mask (32bit constant) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
lhu t3, 2(a0) /* t3 = destination (r5g6b5) */
addiu a1, a1, 4
CONVERT_2x0565_TO_2x8888 t0, t1, t4, t5, t8, t9, s0, s1, s2, s3
CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, t8, t9, s2, s3, s4, s5
OVER_2x8888_2x8_2x8888 t4, t5, a2, a2, s0, s1, \
t0, t1, t6, s2, s3, s4, s5, t4, t5
CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t7, t8, t9, s2, s3
sh s0, 0(a0)
sh s1, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
lhu t0, 0(a1) /* t0 = source (r5g6b5) */
/* a2 = mask (32bit constant) */
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t0, t2, t4, t5
CONVERT_1x0565_TO_1x8888 t1, t3, t4, t5
OVER_8888_8_8888 t2, a2, t3, t0, t6, t1, t4, t5, t7
CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5
j ra
nop
END(pixman_composite_over_0565_n_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_8_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1
li t4, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */
lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */
addiu a1, a1, 8
addiu a2, a2, 2
OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, t5, t6, \
t7, t8, t4, t9, s0, s1, t0, t1, t2
sw t7, 0(a0)
sw t8, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
OVER_8888_8_8888 t0, t1, t2, t3, t4, t5, t6, t7, t8
sw t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1
j ra
nop
END(pixman_composite_over_8888_8_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_8_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8r8g8b8)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5
li t6, 0x00ff00ff
li t7, 0xf800f800
li t8, 0x07e007e0
li t9, 0x001F001F
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lhu t4, 0(a0) /* t4 = destination (r5g6b5) */
lhu t5, 2(a0) /* t5 = destination (r5g6b5) */
addiu a1, a1, 8
addiu a2, a2, 2
CONVERT_2x0565_TO_2x8888 t4, t5, s0, s1, t8, t9, s2, s3, s4, s5
OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, s0, s1, \
t4, t5, t6, s2, s3, s4, s5, t0, t1
CONVERT_2x8888_TO_2x0565 t4, t5, s0, s1, t7, t8, t9, s2, s3
sh s0, 0(a0)
sh s1, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t2, t3, t4, t5
OVER_8888_8_8888 t0, t1, t3, t2, t6, t4, t5, t7, t8
CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5
j ra
nop
END(pixman_composite_over_8888_8_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_0565_8_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (r5g6b5)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5
li t4, 0xf800f800
li t5, 0x07e007e0
li t6, 0x001F001F
li t7, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lhu t0, 0(a1) /* t0 = source (r5g6b5) */
lhu t1, 2(a1) /* t1 = source (r5g6b5) */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lhu t8, 0(a0) /* t8 = destination (r5g6b5) */
lhu t9, 2(a0) /* t9 = destination (r5g6b5) */
addiu a1, a1, 4
addiu a2, a2, 2
CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5
CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, t0, t1
OVER_2x8888_2x8_2x8888 s0, s1, t2, t3, s2, s3, \
t0, t1, t7, s4, s5, t8, t9, s0, s1
CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3
sh s0, 0(a0)
sh s1, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
lhu t0, 0(a1) /* t0 = source (r5g6b5) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5
CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6
OVER_8888_8_8888 t3, t1, t4, t0, t7, t2, t5, t6, t8
CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5
j ra
nop
END(pixman_composite_over_0565_8_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_8888_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - mask (a8r8g8b8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lw t2, 0(a2) /* t2 = mask (a8r8g8b8) */
lw t3, 4(a2) /* t3 = mask (a8r8g8b8) */
lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */
lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */
addiu a1, a1, 8
addiu a2, a2, 8
srl t2, t2, 24
srl t3, t3, 24
OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, t5, t6, t7, t8, t4, t9, s0, s1, s2, t0, t1
sw t7, 0(a0)
sw t8, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 0(a2) /* t1 = mask (a8r8g8b8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
srl t1, t1, 24
OVER_8888_8_8888 t0, t1, t2, t3, t4, t5, t6, t7, t8
sw t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
j ra
nop
END(pixman_composite_over_8888_8888_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
addiu a1, a1, 8
not t5, t0
srl t5, t5, 24
not t6, t1
srl t6, t6, 24
or t7, t5, t6
beqz t7, 11f
or t8, t0, t1
beqz t8, 12f
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t5, t6, t7, t8, t4, t9, s0, s1, s2, t2, t3
addu_s.qb t0, t7, t0
addu_s.qb t1, t8, t1
11:
sw t0, 0(a0)
sw t1, 4(a0)
12:
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
addiu a1, a1, 4
not t2, t0
srl t2, t2, 24
beqz t2, 21f
nop
beqz t0, 3f
MIPS_UN8x4_MUL_UN8 t1, t2, t3, t4, t5, t6, t7
addu_s.qb t0, t3, t0
21:
sw t0, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
j ra
nop
END(pixman_composite_over_8888_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_8888_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8r8g8b8)
* a2 - w
*/
SAVE_REGS_ON_STACK 8, s0, s1, s2, s3, s4, s5
li t4, 0x00ff00ff
li s3, 0xf800f800
li s4, 0x07e007e0
li s5, 0x001F001F
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
lhu t3, 2(a0) /* t3 = destination (r5g6b5) */
addiu a1, a1, 8
not t5, t0
srl t5, t5, 24
not t6, t1
srl t6, t6, 24
or t7, t5, t6
beqz t7, 11f
or t8, t0, t1
beqz t8, 12f
CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, s4, s5, t7, t8, t9, s2
MIPS_2xUN8x4_MUL_2xUN8 s0, s1, t5, t6, t7, t8, t4, t9, t2, t3, s2, s0, s1
addu_s.qb t0, t7, t0
addu_s.qb t1, t8, t1
11:
CONVERT_2x8888_TO_2x0565 t0, t1, t7, t8, s3, s4, s5, t2, t3
sh t7, 0(a0)
sh t8, 2(a0)
12:
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a2, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
addiu a1, a1, 4
not t2, t0
srl t2, t2, 24
beqz t2, 21f
nop
beqz t0, 3f
CONVERT_1x0565_TO_1x8888 t1, s0, t8, t9
MIPS_UN8x4_MUL_UN8 s0, t2, t3, t4, t5, t6, t7
addu_s.qb t0, t3, t0
21:
CONVERT_1x8888_TO_1x0565 t0, s0, t8, t9
sh s0, 0(a0)
3:
RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5
j ra
nop
END(pixman_composite_over_8888_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (32bit constant)
* a2 - w
*/
beqz a2, 5f
nop
not t0, a1
srl t0, t0, 24
bgtz t0, 1f
nop
CONVERT_1x8888_TO_1x0565 a1, t1, t2, t3
0:
sh t1, 0(a0)
addiu a2, a2, -1
bgtz a2, 0b
addiu a0, a0, 2
j ra
nop
1:
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
li t5, 0xf800f800
li t6, 0x07e007e0
li t7, 0x001F001F
addiu t1, a2, -1
beqz t1, 3f
nop
2:
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
lhu t2, 2(a0) /* t2 = destination (r5g6b5) */
CONVERT_2x0565_TO_2x8888 t1, t2, t3, t8, t6, t7, t9, s0, s1, s2
MIPS_2xUN8x4_MUL_2xUN8 t3, t8, t0, t0, t1, t2, t4, t9, s0, s1, s2, t3, t8
addu_s.qb t1, t1, a1
addu_s.qb t2, t2, a1
CONVERT_2x8888_TO_2x0565 t1, t2, t3, t8, t5, t6, t7, s0, s1
sh t3, 0(a0)
sh t8, 2(a0)
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 2b
addiu a0, a0, 4
3:
beqz a2, 4f
nop
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t1, t2, s0, s1
MIPS_UN8x4_MUL_UN8 t2, t0, t1, t4, s0, s1, s2
addu_s.qb t1, t1, a1
CONVERT_1x8888_TO_1x0565 t1, t2, s0, s1
sh t2, 0(a0)
4:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
5:
j ra
nop
END(pixman_composite_over_n_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (32bit constant)
* a2 - w
*/
beqz a2, 5f
nop
not t0, a1
srl t0, t0, 24
bgtz t0, 1f
nop
0:
sw a1, 0(a0)
addiu a2, a2, -1
bgtz a2, 0b
addiu a0, a0, 4
j ra
nop
1:
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
addiu t1, a2, -1
beqz t1, 3f
nop
2:
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t0, t7, t8, t4, t9, s0, s1, s2, t2, t3
addu_s.qb t7, t7, a1
addu_s.qb t8, t8, a1
sw t7, 0(a0)
sw t8, 4(a0)
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 2b
addiu a0, a0, 8
3:
beqz a2, 4f
nop
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
MIPS_UN8x4_MUL_UN8 t1, t0, t3, t4, t5, t6, t7
addu_s.qb t3, t3, a1
sw t3, 0(a0)
4:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
5:
j ra
nop
END(pixman_composite_over_n_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_8_8_8_asm_mips)
/*
* a0 - dst (a8)
* a1 - src (a8)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, v0, v1
li t9, 0x00ff00ff
beqz a3, 3f
nop
srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */
beqz v0, 1f /* branch if less than 4 src pixels */
nop
0:
beqz v0, 1f
addiu v0, v0, -1
lbu t0, 0(a2)
lbu t1, 1(a2)
lbu t2, 2(a2)
lbu t3, 3(a2)
lbu t4, 0(a0)
lbu t5, 1(a0)
lbu t6, 2(a0)
lbu t7, 3(a0)
addiu a2, a2, 4
precr_sra.ph.w t1, t0, 0
precr_sra.ph.w t3, t2, 0
precr_sra.ph.w t5, t4, 0
precr_sra.ph.w t7, t6, 0
precr.qb.ph t0, t3, t1
precr.qb.ph t1, t7, t5
lbu t4, 0(a1)
lbu v1, 1(a1)
lbu t7, 2(a1)
lbu t8, 3(a1)
addiu a1, a1, 4
precr_sra.ph.w v1, t4, 0
precr_sra.ph.w t8, t7, 0
muleu_s.ph.qbl t2, t0, t8
muleu_s.ph.qbr t3, t0, v1
shra_r.ph t4, t2, 8
shra_r.ph t5, t3, 8
and t4, t4, t9
and t5, t5, t9
addq.ph t2, t2, t4
addq.ph t3, t3, t5
shra_r.ph t2, t2, 8
shra_r.ph t3, t3, 8
precr.qb.ph t0, t2, t3
addu_s.qb t2, t0, t1
sb t2, 0(a0)
srl t2, t2, 8
sb t2, 1(a0)
srl t2, t2, 8
sb t2, 2(a0)
srl t2, t2, 8
sb t2, 3(a0)
addiu a3, a3, -4
b 0b
addiu a0, a0, 4
1:
beqz a3, 3f
nop
2:
lbu t8, 0(a1)
lbu t0, 0(a2)
lbu t1, 0(a0)
addiu a1, a1, 1
addiu a2, a2, 1
mul t2, t0, t8
shra_r.ph t3, t2, 8
andi t3, t3, 0xff
addq.ph t2, t2, t3
shra_r.ph t2, t2, 8
andi t2, t2, 0xff
addu_s.qb t2, t2, t1
sb t2, 0(a0)
addiu a3, a3, -1
bnez a3, 2b
addiu a0, a0, 1
3:
RESTORE_REGS_FROM_STACK 0, v0, v1
j ra
nop
END(pixman_composite_add_8_8_8_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_n_8_8_asm_mips)
/*
* a0 - dst (a8)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, v0
li t9, 0x00ff00ff
beqz a3, 3f
nop
srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */
beqz v0, 1f /* branch if less than 4 src pixels */
nop
srl t8, a1, 24
replv.ph t8, t8
0:
beqz v0, 1f
addiu v0, v0, -1
lbu t0, 0(a2)
lbu t1, 1(a2)
lbu t2, 2(a2)
lbu t3, 3(a2)
lbu t4, 0(a0)
lbu t5, 1(a0)
lbu t6, 2(a0)
lbu t7, 3(a0)
addiu a2, a2, 4
precr_sra.ph.w t1, t0, 0
precr_sra.ph.w t3, t2, 0
precr_sra.ph.w t5, t4, 0
precr_sra.ph.w t7, t6, 0
precr.qb.ph t0, t3, t1
precr.qb.ph t1, t7, t5
muleu_s.ph.qbl t2, t0, t8
muleu_s.ph.qbr t3, t0, t8
shra_r.ph t4, t2, 8
shra_r.ph t5, t3, 8
and t4, t4, t9
and t5, t5, t9
addq.ph t2, t2, t4
addq.ph t3, t3, t5
shra_r.ph t2, t2, 8
shra_r.ph t3, t3, 8
precr.qb.ph t0, t2, t3
addu_s.qb t2, t0, t1
sb t2, 0(a0)
srl t2, t2, 8
sb t2, 1(a0)
srl t2, t2, 8
sb t2, 2(a0)
srl t2, t2, 8
sb t2, 3(a0)
addiu a3, a3, -4
b 0b
addiu a0, a0, 4
1:
beqz a3, 3f
nop
srl t8, a1, 24
2:
lbu t0, 0(a2)
lbu t1, 0(a0)
addiu a2, a2, 1
mul t2, t0, t8
shra_r.ph t3, t2, 8
andi t3, t3, 0xff
addq.ph t2, t2, t3
shra_r.ph t2, t2, 8
andi t2, t2, 0xff
addu_s.qb t2, t2, t1
sb t2, 0(a0)
addiu a3, a3, -1
bnez a3, 2b
addiu a0, a0, 1
3:
RESTORE_REGS_FROM_STACK 0, v0
j ra
nop
END(pixman_composite_add_n_8_8_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_n_8_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (32bit constant)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
/* a1 = source (32bit constant) */
lbu t0, 0(a2) /* t0 = mask (a8) */
lbu t1, 1(a2) /* t1 = mask (a8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
addiu a2, a2, 2
MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 a1, a1, \
t0, t1, \
t2, t3, \
t5, t6, \
t4, t7, t8, t9, s0, s1, s2
sw t5, 0(a0)
sw t6, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
/* a1 = source (32bit constant) */
lbu t0, 0(a2) /* t0 = mask (a8) */
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
MIPS_UN8x4_MUL_UN8_ADD_UN8x4 a1, t0, t1, t2, t4, t3, t5, t6
sw t2, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
j ra
nop
END(pixman_composite_add_n_8_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_0565_8_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (r5g6b5)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7
li t4, 0xf800f800
li t5, 0x07e007e0
li t6, 0x001F001F
li t7, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lhu t0, 0(a1) /* t0 = source (r5g6b5) */
lhu t1, 2(a1) /* t1 = source (r5g6b5) */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lhu t8, 0(a0) /* t8 = destination (r5g6b5) */
lhu t9, 2(a0) /* t9 = destination (r5g6b5) */
addiu a1, a1, 4
addiu a2, a2, 2
CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5
CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, s6, s7
MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 s0, s1, \
t2, t3, \
s2, s3, \
t0, t1, \
t7, s4, s5, s6, s7, t8, t9
CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3
sh s0, 0(a0)
sh s1, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
lhu t0, 0(a1) /* t0 = source (r5g6b5) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5
CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6
MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t3, t1, t4, t0, t7, t2, t5, t6
CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7
j ra
nop
END(pixman_composite_add_0565_8_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_8888_8_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - mask (a8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */
lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */
addiu a1, a1, 8
addiu a2, a2, 2
MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \
t2, t3, \
t5, t6, \
t7, t8, \
t4, t9, s0, s1, s2, t0, t1
sw t7, 0(a0)
sw t8, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t3, t4, t5, t6, t7
sw t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
j ra
nop
END(pixman_composite_add_8888_8_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_8888_n_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - mask (32bit constant)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
beqz a3, 3f
nop
srl a2, a2, 24
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
/* a2 = mask (32bit constant) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
addiu a1, a1, 8
MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \
a2, a2, \
t2, t3, \
t5, t6, \
t4, t7, t8, t9, s0, s1, s2
sw t5, 0(a0)
sw t6, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
/* a2 = mask (32bit constant) */
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, a2, t1, t3, t4, t5, t6, t7
sw t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
j ra
nop
END(pixman_composite_add_8888_n_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_8888_8888_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - mask (a8r8g8b8)
* a3 - w
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2
li t4, 0x00ff00ff
beqz a3, 3f
nop
addiu t1, a3, -1
beqz t1, 2f
nop
1:
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 4(a1) /* t1 = source (a8r8g8b8) */
lw t2, 0(a2) /* t2 = mask (a8r8g8b8) */
lw t3, 4(a2) /* t3 = mask (a8r8g8b8) */
lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */
lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */
addiu a1, a1, 8
addiu a2, a2, 8
srl t2, t2, 24
srl t3, t3, 24
MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \
t2, t3, \
t5, t6, \
t7, t8, \
t4, t9, s0, s1, s2, t0, t1
sw t7, 0(a0)
sw t8, 4(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a3, 3f
nop
lw t0, 0(a1) /* t0 = source (a8r8g8b8) */
lw t1, 0(a2) /* t1 = mask (a8r8g8b8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
srl t1, t1, 24
MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t3, t4, t5, t6, t7
sw t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2
j ra
nop
END(pixman_composite_add_8888_8888_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_8_8_asm_mips)
/*
* a0 - dst (a8)
* a1 - src (a8)
* a2 - w
*/
beqz a2, 3f
nop
srl t9, a2, 2 /* t9 = how many multiples of 4 dst pixels */
beqz t9, 1f /* branch if less than 4 src pixels */
nop
0:
beqz t9, 1f
addiu t9, t9, -1
lbu t0, 0(a1)
lbu t1, 1(a1)
lbu t2, 2(a1)
lbu t3, 3(a1)
lbu t4, 0(a0)
lbu t5, 1(a0)
lbu t6, 2(a0)
lbu t7, 3(a0)
addiu a1, a1, 4
precr_sra.ph.w t1, t0, 0
precr_sra.ph.w t3, t2, 0
precr_sra.ph.w t5, t4, 0
precr_sra.ph.w t7, t6, 0
precr.qb.ph t0, t3, t1
precr.qb.ph t1, t7, t5
addu_s.qb t2, t0, t1
sb t2, 0(a0)
srl t2, t2, 8
sb t2, 1(a0)
srl t2, t2, 8
sb t2, 2(a0)
srl t2, t2, 8
sb t2, 3(a0)
addiu a2, a2, -4
b 0b
addiu a0, a0, 4
1:
beqz a2, 3f
nop
2:
lbu t0, 0(a1)
lbu t1, 0(a0)
addiu a1, a1, 1
addu_s.qb t2, t0, t1
sb t2, 0(a0)
addiu a2, a2, -1
bnez a2, 2b
addiu a0, a0, 1
3:
j ra
nop
END(pixman_composite_add_8_8_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_add_8888_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - w
*/
beqz a2, 4f
nop
srl t9, a2, 2 /* t1 = how many multiples of 4 src pixels */
beqz t9, 3f /* branch if less than 4 src pixels */
nop
1:
addiu t9, t9, -1
beqz t9, 2f
addiu a2, a2, -4
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 0(a0)
lw t5, 4(a0)
lw t6, 8(a0)
lw t7, 12(a0)
addiu a1, a1, 16
addu_s.qb t4, t4, t0
addu_s.qb t5, t5, t1
addu_s.qb t6, t6, t2
addu_s.qb t7, t7, t3
sw t4, 0(a0)
sw t5, 4(a0)
sw t6, 8(a0)
sw t7, 12(a0)
b 1b
addiu a0, a0, 16
2:
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 0(a0)
lw t5, 4(a0)
lw t6, 8(a0)
lw t7, 12(a0)
addiu a1, a1, 16
addu_s.qb t4, t4, t0
addu_s.qb t5, t5, t1
addu_s.qb t6, t6, t2
addu_s.qb t7, t7, t3
sw t4, 0(a0)
sw t5, 4(a0)
sw t6, 8(a0)
sw t7, 12(a0)
beqz a2, 4f
addiu a0, a0, 16
3:
lw t0, 0(a1)
lw t1, 0(a0)
addiu a1, a1, 4
addiu a2, a2, -1
addu_s.qb t1, t1, t0
sw t1, 0(a0)
bnez a2, 3b
addiu a0, a0, 4
4:
jr ra
nop
END(pixman_composite_add_8888_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_out_reverse_8_0565_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8)
* a2 - w
*/
beqz a2, 4f
nop
SAVE_REGS_ON_STACK 0, s0, s1, s2, s3
li t2, 0xf800f800
li t3, 0x07e007e0
li t4, 0x001F001F
li t5, 0x00ff00ff
addiu t1, a2, -1
beqz t1, 2f
nop
1:
lbu t0, 0(a1) /* t0 = source (a8) */
lbu t1, 1(a1) /* t1 = source (a8) */
lhu t6, 0(a0) /* t6 = destination (r5g6b5) */
lhu t7, 2(a0) /* t7 = destination (r5g6b5) */
addiu a1, a1, 2
not t0, t0
not t1, t1
andi t0, 0xff /* t0 = neg source1 */
andi t1, 0xff /* t1 = neg source2 */
CONVERT_2x0565_TO_2x8888 t6, t7, t8, t9, t3, t4, s0, s1, s2, s3
MIPS_2xUN8x4_MUL_2xUN8 t8, t9, t0, t1, t6, t7, t5, s0, s1, s2, s3, t8, t9
CONVERT_2x8888_TO_2x0565 t6, t7, t8, t9, t2, t3, t4, s0, s1
sh t8, 0(a0)
sh t9, 2(a0)
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a2, 3f
nop
lbu t0, 0(a1) /* t0 = source (a8) */
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
not t0, t0
andi t0, 0xff /* t0 = neg source */
CONVERT_1x0565_TO_1x8888 t1, t2, t3, t4
MIPS_UN8x4_MUL_UN8 t2, t0, t1, t5, t3, t4, t6
CONVERT_1x8888_TO_1x0565 t1, t2, t3, t4
sh t2, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3
4:
j ra
nop
END(pixman_composite_out_reverse_8_0565_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_out_reverse_8_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8)
* a2 - w
*/
beqz a2, 3f
nop
li t4, 0x00ff00ff
addiu t1, a2, -1
beqz t1, 2f
nop
1:
lbu t0, 0(a1) /* t0 = source (a8) */
lbu t1, 1(a1) /* t1 = source (a8) */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
addiu a1, a1, 2
not t0, t0
not t1, t1
andi t0, 0xff /* t0 = neg source */
andi t1, 0xff /* t1 = neg source */
MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t5, t6, t4, t7, t8, t9, t2, t3, t0
sw t5, 0(a0)
sw t6, 4(a0)
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
lbu t0, 0(a1) /* t0 = source (a8) */
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
not t0, t0
andi t0, 0xff /* t0 = neg source */
MIPS_UN8x4_MUL_UN8 t1, t0, t2, t4, t3, t5, t6
sw t2, 0(a0)
3:
j ra
nop
END(pixman_composite_out_reverse_8_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_over_reverse_n_8888_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (32bit constant)
* a2 - w
*/
beqz a2, 5f
nop
SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7
li t0, 0x00ff00ff
srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */
beqz t9, 2f /* branch if less than 4 src pixels */
nop
1:
beqz t9, 2f
addiu t9, t9, -1
lw t1, 0(a0)
lw t2, 4(a0)
lw t3, 8(a0)
lw t4, 12(a0)
addiu a2, a2, -4
not t5, t1
not t6, t2
not t7, t3
not t8, t4
srl t5, t5, 24
srl t6, t6, 24
srl t7, t7, 24
srl t8, t8, 24
replv.ph t5, t5
replv.ph t6, t6
replv.ph t7, t7
replv.ph t8, t8
muleu_s.ph.qbl s0, a1, t5
muleu_s.ph.qbr s1, a1, t5
muleu_s.ph.qbl s2, a1, t6
muleu_s.ph.qbr s3, a1, t6
muleu_s.ph.qbl s4, a1, t7
muleu_s.ph.qbr s5, a1, t7
muleu_s.ph.qbl s6, a1, t8
muleu_s.ph.qbr s7, a1, t8
shra_r.ph t5, s0, 8
shra_r.ph t6, s1, 8
shra_r.ph t7, s2, 8
shra_r.ph t8, s3, 8
and t5, t5, t0
and t6, t6, t0
and t7, t7, t0
and t8, t8, t0
addq.ph s0, s0, t5
addq.ph s1, s1, t6
addq.ph s2, s2, t7
addq.ph s3, s3, t8
shra_r.ph s0, s0, 8
shra_r.ph s1, s1, 8
shra_r.ph s2, s2, 8
shra_r.ph s3, s3, 8
shra_r.ph t5, s4, 8
shra_r.ph t6, s5, 8
shra_r.ph t7, s6, 8
shra_r.ph t8, s7, 8
and t5, t5, t0
and t6, t6, t0
and t7, t7, t0
and t8, t8, t0
addq.ph s4, s4, t5
addq.ph s5, s5, t6
addq.ph s6, s6, t7
addq.ph s7, s7, t8
shra_r.ph s4, s4, 8
shra_r.ph s5, s5, 8
shra_r.ph s6, s6, 8
shra_r.ph s7, s7, 8
precr.qb.ph t5, s0, s1
precr.qb.ph t6, s2, s3
precr.qb.ph t7, s4, s5
precr.qb.ph t8, s6, s7
addu_s.qb t5, t1, t5
addu_s.qb t6, t2, t6
addu_s.qb t7, t3, t7
addu_s.qb t8, t4, t8
sw t5, 0(a0)
sw t6, 4(a0)
sw t7, 8(a0)
sw t8, 12(a0)
b 1b
addiu a0, a0, 16
2:
beqz a2, 4f
nop
3:
lw t1, 0(a0)
not t2, t1
srl t2, t2, 24
replv.ph t2, t2
muleu_s.ph.qbl t4, a1, t2
muleu_s.ph.qbr t5, a1, t2
shra_r.ph t6, t4, 8
shra_r.ph t7, t5, 8
and t6,t6,t0
and t7,t7,t0
addq.ph t8, t4, t6
addq.ph t9, t5, t7
shra_r.ph t8, t8, 8
shra_r.ph t9, t9, 8
precr.qb.ph t9, t8, t9
addu_s.qb t9, t1, t9
sw t9, 0(a0)
addiu a2, a2, -1
bnez a2, 3b
addiu a0, a0, 4
4:
RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7
5:
j ra
nop
END(pixman_composite_over_reverse_n_8888_asm_mips)
LEAF_MIPS_DSPR2(pixman_composite_in_n_8_asm_mips)
/*
* a0 - dst (a8)
* a1 - src (32bit constant)
* a2 - w
*/
li t9, 0x00ff00ff
beqz a2, 3f
nop
srl t7, a2, 2 /* t7 = how many multiples of 4 dst pixels */
beqz t7, 1f /* branch if less than 4 src pixels */
nop
srl t8, a1, 24
replv.ph t8, t8
0:
beqz t7, 1f
addiu t7, t7, -1
lbu t0, 0(a0)
lbu t1, 1(a0)
lbu t2, 2(a0)
lbu t3, 3(a0)
precr_sra.ph.w t1, t0, 0
precr_sra.ph.w t3, t2, 0
precr.qb.ph t0, t3, t1
muleu_s.ph.qbl t2, t0, t8
muleu_s.ph.qbr t3, t0, t8
shra_r.ph t4, t2, 8
shra_r.ph t5, t3, 8
and t4, t4, t9
and t5, t5, t9
addq.ph t2, t2, t4
addq.ph t3, t3, t5
shra_r.ph t2, t2, 8
shra_r.ph t3, t3, 8
precr.qb.ph t2, t2, t3
sb t2, 0(a0)
srl t2, t2, 8
sb t2, 1(a0)
srl t2, t2, 8
sb t2, 2(a0)
srl t2, t2, 8
sb t2, 3(a0)
addiu a2, a2, -4
b 0b
addiu a0, a0, 4
1:
beqz a2, 3f
nop
srl t8, a1, 24
2:
lbu t0, 0(a0)
mul t2, t0, t8
shra_r.ph t3, t2, 8
andi t3, t3, 0x00ff
addq.ph t2, t2, t3
shra_r.ph t2, t2, 8
sb t2, 0(a0)
addiu a2, a2, -1
bnez a2, 2b
addiu a0, a0, 1
3:
j ra
nop
END(pixman_composite_in_n_8_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_8888_OVER_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (a8r8g8b8)
* a2 - w
* a3 - vx
* 16(sp) - unit_x
*/
SAVE_REGS_ON_STACK 0, s0, s1, s2, s3
lw t8, 16(sp) /* t8 = unit_x */
li t6, 0x00ff00ff
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
1:
sra t0, a3, 16 /* t0 = vx >> 16 */
sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t0, a1, t0
lw t0, 0(t0) /* t0 = source (a8r8g8b8) */
addu a3, a3, t8 /* a3 = vx + unit_x */
sra t1, a3, 16 /* t0 = vx >> 16 */
sll t1, t1, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t1, a1, t1
lw t1, 0(t1) /* t1 = source (a8r8g8b8) */
addu a3, a3, t8 /* a3 = vx + unit_x */
lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */
lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */
OVER_2x8888_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t9, s0, s1, s2, s3
sw t4, 0(a0)
sw t5, 4(a0)
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
sra t0, a3, 16 /* t0 = vx >> 16 */
sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t0, a1, t0
lw t0, 0(t0) /* t0 = source (a8r8g8b8) */
lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */
addu a3, a3, t8 /* a3 = vx + unit_x */
OVER_8888_8888 t0, t1, t2, t6, t4, t5, t3, t7
sw t2, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3
j ra
nop
END(pixman_scaled_nearest_scanline_8888_8888_OVER_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_0565_OVER_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8r8g8b8)
* a2 - w
* a3 - vx
* 16(sp) - unit_x
*/
SAVE_REGS_ON_STACK 24, s0, s1, s2, s3, s4, v0, v1
lw t8, 40(sp) /* t8 = unit_x */
li t4, 0x00ff00ff
li t5, 0xf800f800
li t6, 0x07e007e0
li t7, 0x001F001F
beqz a2, 3f
nop
addiu t1, a2, -1
beqz t1, 2f
nop
1:
sra t0, a3, 16 /* t0 = vx >> 16 */
sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t0, a1, t0
lw t0, 0(t0) /* t0 = source (a8r8g8b8) */
addu a3, a3, t8 /* a3 = vx + unit_x */
sra t1, a3, 16 /* t0 = vx >> 16 */
sll t1, t1, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t1, a1, t1
lw t1, 0(t1) /* t1 = source (a8r8g8b8) */
addu a3, a3, t8 /* a3 = vx + unit_x */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
lhu t3, 2(a0) /* t3 = destination (r5g6b5) */
CONVERT_2x0565_TO_2x8888 t2, t3, v0, v1, t6, t7, s0, s1, s2, s3
OVER_2x8888_2x8888 t0, t1, v0, v1, t2, t3, t4, t9, s0, s1, s2, s3, s4
CONVERT_2x8888_TO_2x0565 t2, t3, v0, v1, t5, t6, t7, t9, s2
sh v0, 0(a0)
sh v1, 2(a0)
addiu a2, a2, -2
addiu t1, a2, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a2, 3f
nop
sra t0, a3, 16 /* t0 = vx >> 16 */
sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t0, a1, t0
lw t0, 0(t0) /* t0 = source (a8r8g8b8) */
lhu t1, 0(a0) /* t1 = destination (r5g6b5) */
addu a3, a3, t8 /* a3 = vx + unit_x */
CONVERT_1x0565_TO_1x8888 t1, t2, t5, t6
OVER_8888_8888 t0, t2, t1, t4, t3, t5, t6, t7
CONVERT_1x8888_TO_1x0565 t1, t2, t5, t6
sh t2, 0(a0)
3:
RESTORE_REGS_FROM_STACK 24, s0, s1, s2, s3, s4, v0, v1
j ra
nop
END(pixman_scaled_nearest_scanline_8888_0565_OVER_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_0565_8888_SRC_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - src (r5g6b5)
* a2 - w
* a3 - vx
* 16(sp) - unit_x
*/
SAVE_REGS_ON_STACK 0, v0
beqz a2, 3f
nop
lw v0, 16(sp) /* v0 = unit_x */
addiu t1, a2, -1
beqz t1, 2f
nop
li t4, 0x07e007e0
li t5, 0x001F001F
1:
sra t0, a3, 16 /* t0 = vx >> 16 */
sll t0, t0, 1 /* t0 = t0 * 2 ((r5g6b5)) */
addu t0, a1, t0
lhu t0, 0(t0) /* t0 = source ((r5g6b5)) */
addu a3, a3, v0 /* a3 = vx + unit_x */
sra t1, a3, 16 /* t1 = vx >> 16 */
sll t1, t1, 1 /* t1 = t1 * 2 ((r5g6b5)) */
addu t1, a1, t1
lhu t1, 0(t1) /* t1 = source ((r5g6b5)) */
addu a3, a3, v0 /* a3 = vx + unit_x */
addiu a2, a2, -2
CONVERT_2x0565_TO_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
sw t2, 0(a0)
sw t3, 4(a0)
addiu t2, a2, -1
bgtz t2, 1b
addiu a0, a0, 8
2:
beqz a2, 3f
nop
sra t0, a3, 16 /* t0 = vx >> 16 */
sll t0, t0, 1 /* t0 = t0 * 2 ((r5g6b5)) */
addu t0, a1, t0
lhu t0, 0(t0) /* t0 = source ((r5g6b5)) */
CONVERT_1x0565_TO_1x8888 t0, t1, t2, t3
sw t1, 0(a0)
3:
RESTORE_REGS_FROM_STACK 0, v0
j ra
nop
END(pixman_scaled_nearest_scanline_0565_8888_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (a8r8g8b8)
* a2 - mask (a8)
* a3 - w
* 16(sp) - vx
* 20(sp) - unit_x
*/
beqz a3, 4f
nop
SAVE_REGS_ON_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5
lw v0, 36(sp) /* v0 = vx */
lw v1, 40(sp) /* v1 = unit_x */
li t6, 0x00ff00ff
li t7, 0xf800f800
li t8, 0x07e007e0
li t9, 0x001F001F
addiu t1, a3, -1
beqz t1, 2f
nop
1:
sra t0, v0, 16 /* t0 = vx >> 16 */
sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t0, a1, t0
lw t0, 0(t0) /* t0 = source (a8r8g8b8) */
addu v0, v0, v1 /* v0 = vx + unit_x */
sra t1, v0, 16 /* t1 = vx >> 16 */
sll t1, t1, 2 /* t1 = t1 * 4 (a8r8g8b8) */
addu t1, a1, t1
lw t1, 0(t1) /* t1 = source (a8r8g8b8) */
addu v0, v0, v1 /* v0 = vx + unit_x */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lhu t4, 0(a0) /* t4 = destination (r5g6b5) */
lhu t5, 2(a0) /* t5 = destination (r5g6b5) */
addiu a2, a2, 2
CONVERT_2x0565_TO_2x8888 t4, t5, s0, s1, t8, t9, s2, s3, s4, s5
OVER_2x8888_2x8_2x8888 t0, t1, \
t2, t3, \
s0, s1, \
t4, t5, \
t6, s2, s3, s4, s5, t2, t3
CONVERT_2x8888_TO_2x0565 t4, t5, s0, s1, t7, t8, t9, s2, s3
sh s0, 0(a0)
sh s1, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
sra t0, v0, 16 /* t0 = vx >> 16 */
sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */
addu t0, a1, t0
lw t0, 0(t0) /* t0 = source (a8r8g8b8) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t2, t3, t4, t5
OVER_8888_8_8888 t0, t1, t3, t2, t6, t4, t5, t7, t8
CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5
4:
j ra
nop
END(pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_mips)
/*
* a0 - dst (r5g6b5)
* a1 - src (r5g6b5)
* a2 - mask (a8)
* a3 - w
* 16(sp) - vx
* 20(sp) - unit_x
*/
beqz a3, 4f
nop
SAVE_REGS_ON_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5
lw v0, 36(sp) /* v0 = vx */
lw v1, 40(sp) /* v1 = unit_x */
li t4, 0xf800f800
li t5, 0x07e007e0
li t6, 0x001F001F
li t7, 0x00ff00ff
addiu t1, a3, -1
beqz t1, 2f
nop
1:
sra t0, v0, 16 /* t0 = vx >> 16 */
sll t0, t0, 1 /* t0 = t0 * 2 (r5g6b5) */
addu t0, a1, t0
lhu t0, 0(t0) /* t0 = source (r5g6b5) */
addu v0, v0, v1 /* v0 = vx + unit_x */
sra t1, v0, 16 /* t1 = vx >> 16 */
sll t1, t1, 1 /* t1 = t1 * 2 (r5g6b5) */
addu t1, a1, t1
lhu t1, 0(t1) /* t1 = source (r5g6b5) */
addu v0, v0, v1 /* v0 = vx + unit_x */
lbu t2, 0(a2) /* t2 = mask (a8) */
lbu t3, 1(a2) /* t3 = mask (a8) */
lhu t8, 0(a0) /* t8 = destination (r5g6b5) */
lhu t9, 2(a0) /* t9 = destination (r5g6b5) */
addiu a2, a2, 2
CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5
CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, t0, t1
OVER_2x8888_2x8_2x8888 s0, s1, \
t2, t3, \
s2, s3, \
t0, t1, \
t7, t8, t9, s4, s5, s0, s1
CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3
sh s0, 0(a0)
sh s1, 2(a0)
addiu a3, a3, -2
addiu t1, a3, -1
bgtz t1, 1b
addiu a0, a0, 4
2:
beqz a3, 3f
nop
sra t0, v0, 16 /* t0 = vx >> 16 */
sll t0, t0, 1 /* t0 = t0 * 2 (r5g6b5) */
addu t0, a1, t0
lhu t0, 0(t0) /* t0 = source (r5g6b5) */
lbu t1, 0(a2) /* t1 = mask (a8) */
lhu t2, 0(a0) /* t2 = destination (r5g6b5) */
CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5
CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6
OVER_8888_8_8888 t3, t1, t4, t0, t7, t2, t5, t6, t8
CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5
sh t3, 0(a0)
3:
RESTORE_REGS_FROM_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5
4:
j ra
nop
END(pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *src_top
* a2 - *src_bottom
* a3 - w
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
*/
beqz a3, 1f
nop
SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7
lw s0, 36(sp) /* s0 = wt */
lw s1, 40(sp) /* s1 = wb */
lw s2, 44(sp) /* s2 = vx */
lw s3, 48(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a1) /* t0 = tl */
lwx t1, t8(a1) /* t1 = tr */
addiu a3, a3, -1
lwx t2, t9(a2) /* t2 = bl */
lwx t3, t8(a2) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
addu s2, s2, s3 /* vx += unit_x; */
sw t0, 0(a0)
bnez a3, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *src_top
* a2 - *src_bottom
* a3 - w
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
*/
beqz a3, 1f
nop
SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7
lw s0, 36(sp) /* s0 = wt */
lw s1, 40(sp) /* s1 = wb */
lw s2, 44(sp) /* s2 = vx */
lw s3, 48(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a1) /* t0 = tl */
lwx t1, t8(a1) /* t1 = tr */
addiu a3, a3, -1
lwx t2, t9(a2) /* t2 = bl */
lwx t3, t8(a2) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3
addu s2, s2, s3 /* vx += unit_x; */
sh t1, 0(a0)
bnez a3, 0b
addiu a0, a0, 2
RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8888_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *src_top
* a2 - *src_bottom
* a3 - w
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
*/
beqz a3, 1f
nop
SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw s0, 44(sp) /* s0 = wt */
lw s1, 48(sp) /* s1 = wb */
lw s2, 52(sp) /* s2 = vx */
lw s3, 56(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li v1, 0x07e007e0
li s8, 0x001f001f
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 1
addiu t8, t9, 2
lhx t0, t9(a1) /* t0 = tl */
lhx t1, t8(a1) /* t1 = tr */
andi t1, t1, 0xffff
addiu a3, a3, -1
lhx t2, t9(a2) /* t2 = bl */
lhx t3, t8(a2) /* t3 = br */
andi t3, t3, 0xffff
CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7
CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
addu s2, s2, s3 /* vx += unit_x; */
sw t0, 0(a0)
bnez a3, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_0565_8888_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *src_top
* a2 - *src_bottom
* a3 - w
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
*/
beqz a3, 1f
nop
SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw s0, 44(sp) /* s0 = wt */
lw s1, 48(sp) /* s1 = wb */
lw s2, 52(sp) /* s2 = vx */
lw s3, 56(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li v1, 0x07e007e0
li s8, 0x001f001f
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 1
addiu t8, t9, 2
lhx t0, t9(a1) /* t0 = tl */
lhx t1, t8(a1) /* t1 = tr */
andi t1, t1, 0xffff
addiu a3, a3, -1
lhx t2, t9(a2) /* t2 = bl */
lhx t3, t8(a2) /* t3 = br */
andi t3, t3, 0xffff
CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7
CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3
addu s2, s2, s3 /* vx += unit_x; */
sh t1, 0(a0)
bnez a3, 0b
addiu a0, a0, 2
RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_mips)
/*
* a0 - *dst
* a1 - *src_top
* a2 - *src_bottom
* a3 - w
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
*/
beqz a3, 1f
nop
SAVE_REGS_ON_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw s0, 40(sp) /* s0 = wt */
lw s1, 44(sp) /* s1 = wb */
lw s2, 48(sp) /* s2 = vx */
lw s3, 52(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li s8, 0x00ff00ff
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a1) /* t0 = tl */
lwx t1, t8(a1) /* t1 = tr */
addiu a3, a3, -1
lwx t2, t9(a2) /* t2 = bl */
lwx t3, t8(a2) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lw t1, 0(a0) /* t1 = dest */
OVER_8888_8888 t0, t1, t2, s8, t3, t4, t5, t6
addu s2, s2, s3 /* vx += unit_x; */
sw t2, 0(a0)
bnez a3, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_mips)
/*
* a0 - *dst
* a1 - *src_top
* a2 - *src_bottom
* a3 - w
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
*/
beqz a3, 1f
nop
SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7
lw s0, 36(sp) /* s0 = wt */
lw s1, 40(sp) /* s1 = wb */
lw s2, 44(sp) /* s2 = vx */
lw s3, 48(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a1) /* t0 = tl */
lwx t1, t8(a1) /* t1 = tr */
addiu a3, a3, -1
lwx t2, t9(a2) /* t2 = bl */
lwx t3, t8(a2) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lw t1, 0(a0)
addu_s.qb t2, t0, t1
addu s2, s2, s3 /* vx += unit_x; */
sw t2, 0(a0)
bnez a3, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *mask
* a2 - *src_top
* a3 - *src_bottom
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
* 32(sp) - w
*/
lw v1, 32(sp)
beqz v1, 1f
nop
SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw s0, 44(sp) /* s0 = wt */
lw s1, 48(sp) /* s1 = wb */
lw s2, 52(sp) /* s2 = vx */
lw s3, 56(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li s8, 0x00ff00ff
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a2) /* t0 = tl */
lwx t1, t8(a2) /* t1 = tr */
addiu v1, v1, -1
lwx t2, t9(a3) /* t2 = bl */
lwx t3, t8(a3) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lbu t1, 0(a1) /* t1 = mask */
addiu a1, a1, 1
MIPS_UN8x4_MUL_UN8 t0, t1, t0, s8, t2, t3, t4
addu s2, s2, s3 /* vx += unit_x; */
sw t0, 0(a0)
bnez v1, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *mask
* a2 - *src_top
* a3 - *src_bottom
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
* 32(sp) - w
*/
lw v1, 32(sp)
beqz v1, 1f
nop
SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw s0, 44(sp) /* s0 = wt */
lw s1, 48(sp) /* s1 = wb */
lw s2, 52(sp) /* s2 = vx */
lw s3, 56(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li s8, 0x00ff00ff
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a2) /* t0 = tl */
lwx t1, t8(a2) /* t1 = tr */
addiu v1, v1, -1
lwx t2, t9(a3) /* t2 = bl */
lwx t3, t8(a3) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lbu t1, 0(a1) /* t1 = mask */
addiu a1, a1, 1
MIPS_UN8x4_MUL_UN8 t0, t1, t0, s8, t2, t3, t4
CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3
addu s2, s2, s3 /* vx += unit_x; */
sh t1, 0(a0)
bnez v1, 0b
addiu a0, a0, 2
RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *mask
* a2 - *src_top
* a3 - *src_bottom
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
* 32(sp) - w
*/
lw t0, 32(sp)
beqz t0, 1f
nop
SAVE_REGS_ON_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra
lw s0, 48(sp) /* s0 = wt */
lw s1, 52(sp) /* s1 = wb */
lw s2, 56(sp) /* s2 = vx */
lw s3, 60(sp) /* s3 = unit_x */
lw ra, 64(sp) /* ra = w */
li v0, 0x00ff00ff
li v1, 0x07e007e0
li s8, 0x001f001f
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
li t5, BILINEAR_INTERPOLATION_RANGE
subu t5, t5, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 1
addiu t8, t9, 2
lhx t0, t9(a2) /* t0 = tl */
lhx t1, t8(a2) /* t1 = tr */
andi t1, t1, 0xffff
addiu ra, ra, -1
lhx t2, t9(a3) /* t2 = bl */
lhx t3, t8(a3) /* t3 = br */
andi t3, t3, 0xffff
CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7
CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lbu t1, 0(a1) /* t1 = mask */
addiu a1, a1, 1
MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t2, t3, t4
addu s2, s2, s3 /* vx += unit_x; */
sw t0, 0(a0)
bnez ra, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_mips)
/*
* a0 - *dst
* a1 - *mask
* a2 - *src_top
* a3 - *src_bottom
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
* 32(sp) - w
*/
lw t0, 32(sp)
beqz t0, 1f
nop
SAVE_REGS_ON_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra
lw s0, 48(sp) /* s0 = wt */
lw s1, 52(sp) /* s1 = wb */
lw s2, 56(sp) /* s2 = vx */
lw s3, 60(sp) /* s3 = unit_x */
lw ra, 64(sp) /* ra = w */
li v0, 0x00ff00ff
li v1, 0x07e007e0
li s8, 0x001f001f
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
li t5, BILINEAR_INTERPOLATION_RANGE
subu t5, t5, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 1
addiu t8, t9, 2
lhx t0, t9(a2) /* t0 = tl */
lhx t1, t8(a2) /* t1 = tr */
andi t1, t1, 0xffff
addiu ra, ra, -1
lhx t2, t9(a3) /* t2 = bl */
lhx t3, t8(a3) /* t3 = br */
andi t3, t3, 0xffff
CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7
CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lbu t1, 0(a1) /* t1 = mask */
addiu a1, a1, 1
MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t2, t3, t4
CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3
addu s2, s2, s3 /* vx += unit_x; */
sh t1, 0(a0)
bnez ra, 0b
addiu a0, a0, 2
RESTORE_REGS_FROM_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_mips)
/*
* a0 - dst (a8r8g8b8)
* a1 - mask (a8)
* a2 - src_top (a8r8g8b8)
* a3 - src_bottom (a8r8g8b8)
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
* 32(sp) - w
*/
SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw v1, 60(sp) /* v1 = w(sp + 32 + 28 save regs stack offset)*/
beqz v1, 1f
nop
lw s0, 44(sp) /* s0 = wt */
lw s1, 48(sp) /* s1 = wb */
lw s2, 52(sp) /* s2 = vx */
lw s3, 56(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li s8, 0x00ff00ff
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a2) /* t0 = tl */
lwx t1, t8(a2) /* t1 = tr */
addiu v1, v1, -1
lwx t2, t9(a3) /* t2 = bl */
lwx t3, t8(a3) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, \
t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lbu t1, 0(a1) /* t1 = mask */
lw t2, 0(a0) /* t2 = dst */
addiu a1, a1, 1
OVER_8888_8_8888 t0, t1, t2, t0, s8, t3, t4, t5, t6
addu s2, s2, s3 /* vx += unit_x; */
sw t0, 0(a0)
bnez v1, 0b
addiu a0, a0, 4
1:
RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_mips)
LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_mips)
/*
* a0 - *dst
* a1 - *mask
* a2 - *src_top
* a3 - *src_bottom
* 16(sp) - wt
* 20(sp) - wb
* 24(sp) - vx
* 28(sp) - unit_x
* 32(sp) - w
*/
lw v1, 32(sp)
beqz v1, 1f
nop
SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
lw s0, 44(sp) /* s0 = wt */
lw s1, 48(sp) /* s1 = wb */
lw s2, 52(sp) /* s2 = vx */
lw s3, 56(sp) /* s3 = unit_x */
li v0, BILINEAR_INTERPOLATION_RANGE
li s8, 0x00ff00ff
sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS))
0:
andi t4, s2, 0xffff /* t4 = (short)vx */
srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */
subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */
mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */
mul s5, s0, t4 /* s5 = wt*(vx>>8) */
mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */
mul s7, s1, t4 /* s7 = wb*(vx>>8) */
sra t9, s2, 16
sll t9, t9, 2
addiu t8, t9, 4
lwx t0, t9(a2) /* t0 = tl */
lwx t1, t8(a2) /* t1 = tr */
addiu v1, v1, -1
lwx t2, t9(a3) /* t2 = bl */
lwx t3, t8(a3) /* t3 = br */
BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7
lbu t1, 0(a1) /* t1 = mask */
lw t2, 0(a0) /* t2 = dst */
addiu a1, a1, 1
MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t0, s8, t3, t4, t5
addu s2, s2, s3 /* vx += unit_x; */
sw t0, 0(a0)
bnez v1, 0b
addiu a0, a0, 4
RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8
1:
j ra
nop
END(pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_mips)
|
imishinist/green-thread | 949 | asm/context.S | #ifdef __APPLE__
#define SET_CONTEXT _set_context
#define SWITCH_CONTEXT _switch_context
#else
#define SET_CONTEXT set_context
#define SWITCH_CONTEXT switch_context
#endif
.global SET_CONTEXT
.global SWITCH_CONTEXT
SET_CONTEXT:
stp d8, d9, [x0]
stp d10, d11, [x0, #16]
stp d12, d13, [x0, #16 * 2]
stp d14, d15, [x0, #16 * 3]
stp x19, x20, [x0, #16 * 4]
stp x21, x22, [x0, #16 * 5]
stp x23, x24, [x0, #16 * 6]
stp x25, x26, [x0, #16 * 7]
stp x27, x28, [x0, #16 * 8]
mov x1, sp
stp x30, x1, [x0, #16 * 9]
mov x0, 0
ret
SWITCH_CONTEXT:
ldp d8, d9, [x0]
ldp d10, d11, [x0, #16]
ldp d12, d13, [x0, #16 * 2]
ldp d14, d15, [x0, #16 * 3]
ldp x19, x20, [x0, #16 * 4]
ldp x21, x22, [x0, #16 * 5]
ldp x23, x24, [x0, #16 * 6]
ldp x25, x26, [x0, #16 * 7]
ldp x27, x28, [x0, #16 * 8]
ldp x30, x2, [x0, #16 * 9]
mov sp, x2
mov x0, 1
ret |
imnotluoluo/banishrcore | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
imnotluoluo/banishrcore | 1,488 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
IncalaCode/fid-what-you-see | 10,494 | cpp_code.s | .file "test1.cpp"
.section .rdata,"dr"
__ZStL19piecewise_construct:
.space 1
.lcomm __ZStL8__ioinit,1,1
LC0:
.ascii "\0"
LC1:
.ascii "<\0"
LC2:
.ascii ">\0"
LC3:
.ascii "|\0"
.text
.globl __Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_
.def __Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_; .scl 2; .type 32; .endef
__Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_:
LFB1445:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
pushl %ebx
subl $36, %esp
.cfi_offset 3, -12
movl $0, -12(%ebp)
L3:
movl $LC0, 4(%esp)
movl 12(%ebp), %eax
movl %eax, (%esp)
call __ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_
testb %al, %al
je L2
movl -12(%ebp), %eax
movl %eax, (%esp)
movl 8(%ebp), %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEixEj
subl $4, %esp
movzbl (%eax), %eax
movsbl %al, %ebx
movl $LC1, 4(%esp)
movl $__ZSt4cout, (%esp)
call __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl %ebx, 4(%esp)
movl %eax, (%esp)
call __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c
movl $LC2, 4(%esp)
movl %eax, (%esp)
call __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
addl $1, -12(%ebp)
jmp L3
L2:
movl $LC0, 4(%esp)
movl 8(%ebp), %eax
movl %eax, (%esp)
call __ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_
testb %al, %al
je L4
movl -12(%ebp), %eax
movl %eax, (%esp)
movl 8(%ebp), %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEixEj
subl $4, %esp
movzbl (%eax), %eax
movsbl %al, %ebx
movl $LC3, 4(%esp)
movl $__ZSt4cout, (%esp)
call __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl %ebx, 4(%esp)
movl %eax, (%esp)
call __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c
addl $1, -12(%ebp)
jmp L2
L4:
movl $LC0, 4(%esp)
movl 12(%ebp), %eax
movl %eax, (%esp)
call __ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_
testb %al, %al
je L6
movl $LC1, 4(%esp)
movl $__ZSt4cout, (%esp)
call __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl %eax, %edx
movl 12(%ebp), %eax
movl %eax, 4(%esp)
movl %edx, (%esp)
call __ZStlsIcSt11char_traitsIcESaIcEERSt13basic_ostreamIT_T0_ES7_RKNSt7__cxx1112basic_stringIS4_S5_T1_EE
jmp L4
L6:
nop
movl -4(%ebp), %ebx
leave
.cfi_restore 5
.cfi_restore 3
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1445:
.def ___main; .scl 2; .type 32; .endef
.section .rdata,"dr"
LC4:
.ascii "ABCDEFGHIJKLMNOPQRSTUVWXYZ\0"
LC5:
.ascii "1234567890\0"
.text
.globl _main
.def _main; .scl 2; .type 32; .endef
_main:
LFB1446:
.cfi_startproc
.cfi_personality 0,___gxx_personality_v0
.cfi_lsda 0,LLSDA1446
leal 4(%esp), %ecx
.cfi_def_cfa 1, 0
andl $-16, %esp
pushl -4(%ecx)
pushl %ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
movl %esp, %ebp
pushl %ebx
pushl %ecx
.cfi_escape 0xf,0x3,0x75,0x78,0x6
.cfi_escape 0x10,0x3,0x2,0x75,0x7c
addl $-128, %esp
call ___main
leal -58(%ebp), %eax
movl %eax, %ecx
call __ZNSaIcEC1Ev
leal -84(%ebp), %eax
leal -58(%ebp), %edx
movl %edx, 4(%esp)
movl $LC4, (%esp)
movl %eax, %ecx
LEHB0:
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EPKcRKS3_
LEHE0:
subl $8, %esp
leal -58(%ebp), %eax
movl %eax, %ecx
call __ZNSaIcED1Ev
leal -57(%ebp), %eax
movl %eax, %ecx
call __ZNSaIcEC1Ev
leal -108(%ebp), %eax
leal -57(%ebp), %edx
movl %edx, 4(%esp)
movl $LC5, (%esp)
movl %eax, %ecx
LEHB1:
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EPKcRKS3_
LEHE1:
subl $8, %esp
leal -57(%ebp), %eax
movl %eax, %ecx
call __ZNSaIcED1Ev
leal -56(%ebp), %eax
leal -108(%ebp), %edx
movl %edx, (%esp)
movl %eax, %ecx
LEHB2:
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_
LEHE2:
subl $4, %esp
leal -32(%ebp), %eax
leal -84(%ebp), %edx
movl %edx, (%esp)
movl %eax, %ecx
LEHB3:
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_
LEHE3:
subl $4, %esp
leal -56(%ebp), %eax
movl %eax, 4(%esp)
leal -32(%ebp), %eax
movl %eax, (%esp)
LEHB4:
call __Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_
LEHE4:
leal -32(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
leal -56(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
movl $0, %ebx
leal -108(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
leal -84(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
movl %ebx, %eax
jmp L20
L15:
movl %eax, %ebx
leal -58(%ebp), %eax
movl %eax, %ecx
call __ZNSaIcED1Ev
movl %ebx, %eax
movl %eax, (%esp)
LEHB5:
call __Unwind_Resume
L16:
movl %eax, %ebx
leal -57(%ebp), %eax
movl %eax, %ecx
call __ZNSaIcED1Ev
jmp L11
L19:
movl %eax, %ebx
leal -32(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
jmp L13
L18:
movl %eax, %ebx
L13:
leal -56(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
jmp L14
L17:
movl %eax, %ebx
L14:
leal -108(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
L11:
leal -84(%ebp), %eax
movl %eax, %ecx
call __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
movl %ebx, %eax
movl %eax, (%esp)
call __Unwind_Resume
LEHE5:
L20:
leal -8(%ebp), %esp
popl %ecx
.cfi_restore 1
.cfi_def_cfa 1, 0
popl %ebx
.cfi_restore 3
popl %ebp
.cfi_restore 5
leal -4(%ecx), %esp
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1446:
.def ___gxx_personality_v0; .scl 2; .type 32; .endef
.section .gcc_except_table,"w"
LLSDA1446:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 LLSDACSE1446-LLSDACSB1446
LLSDACSB1446:
.uleb128 LEHB0-LFB1446
.uleb128 LEHE0-LEHB0
.uleb128 L15-LFB1446
.uleb128 0
.uleb128 LEHB1-LFB1446
.uleb128 LEHE1-LEHB1
.uleb128 L16-LFB1446
.uleb128 0
.uleb128 LEHB2-LFB1446
.uleb128 LEHE2-LEHB2
.uleb128 L17-LFB1446
.uleb128 0
.uleb128 LEHB3-LFB1446
.uleb128 LEHE3-LEHB3
.uleb128 L18-LFB1446
.uleb128 0
.uleb128 LEHB4-LFB1446
.uleb128 LEHE4-LEHB4
.uleb128 L19-LFB1446
.uleb128 0
.uleb128 LEHB5-LFB1446
.uleb128 LEHE5-LEHB5
.uleb128 0
.uleb128 0
LLSDACSE1446:
.text
.section .text$_ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_,"x"
.linkonce discard
.globl __ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_
.def __ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_; .scl 2; .type 32; .endef
__ZStneIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_:
LFB1654:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $24, %esp
movl 12(%ebp), %eax
movl %eax, 4(%esp)
movl 8(%ebp), %eax
movl %eax, (%esp)
call __ZSteqIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_
xorl $1, %eax
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1654:
.section .text$_ZSteqIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_,"x"
.linkonce discard
.globl __ZSteqIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_
.def __ZSteqIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_; .scl 2; .type 32; .endef
__ZSteqIcSt11char_traitsIcESaIcEEbRKNSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_:
LFB1750:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $24, %esp
movl 12(%ebp), %eax
movl %eax, (%esp)
movl 8(%ebp), %ecx
call __ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEPKc
subl $4, %esp
testl %eax, %eax
sete %al
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1750:
.text
.def ___tcf_0; .scl 3; .type 32; .endef
___tcf_0:
LFB1882:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $8, %esp
movl $__ZStL8__ioinit, %ecx
call __ZNSt8ios_base4InitD1Ev
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1882:
.def __Z41__static_initialization_and_destruction_0ii; .scl 3; .type 32; .endef
__Z41__static_initialization_and_destruction_0ii:
LFB1881:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $24, %esp
cmpl $1, 8(%ebp)
jne L28
cmpl $65535, 12(%ebp)
jne L28
movl $__ZStL8__ioinit, %ecx
call __ZNSt8ios_base4InitC1Ev
movl $___tcf_0, (%esp)
call _atexit
L28:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1881:
.def __GLOBAL__sub_I__Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_; .scl 3; .type 32; .endef
__GLOBAL__sub_I__Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_:
LFB1883:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $24, %esp
movl $65535, 4(%esp)
movl $1, (%esp)
call __Z41__static_initialization_and_destruction_0ii
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
LFE1883:
.section .ctors,"w"
.align 4
.long __GLOBAL__sub_I__Z10printTableNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_
.ident "GCC: (MinGW.org GCC-6.3.0-1) 6.3.0"
.def __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEixEj; .scl 2; .type 32; .endef
.def __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc; .scl 2; .type 32; .endef
.def __ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c; .scl 2; .type 32; .endef
.def __ZStlsIcSt11char_traitsIcESaIcEERSt13basic_ostreamIT_T0_ES7_RKNSt7__cxx1112basic_stringIS4_S5_T1_EE; .scl 2; .type 32; .endef
.def __ZNSaIcEC1Ev; .scl 2; .type 32; .endef
.def __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EPKcRKS3_; .scl 2; .type 32; .endef
.def __ZNSaIcED1Ev; .scl 2; .type 32; .endef
.def __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_; .scl 2; .type 32; .endef
.def __ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev; .scl 2; .type 32; .endef
.def __Unwind_Resume; .scl 2; .type 32; .endef
.def __ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEPKc; .scl 2; .type 32; .endef
.def __ZNSt8ios_base4InitD1Ev; .scl 2; .type 32; .endef
.def __ZNSt8ios_base4InitC1Ev; .scl 2; .type 32; .endef
.def _atexit; .scl 2; .type 32; .endef
|
Inco-fhevm/inco-sgx-enclave-poc | 2,830 | sgxvm/sgx-sdk/sgx_unwind/libunwind/src/x86_64/setcontext.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2007 Google, Inc
Contributed by Arun Sharma <arun.sharma@google.com>
Copyright (C) 2010 Konstantin Belousov <kib@freebsd.org>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "ucontext_i.h"
/* int _Ux86_64_setcontext (const ucontext_t *ucp)
Restores the machine context provided.
Unlike the libc implementation, doesn't clobber %rax
*/
.global _Ux86_64_setcontext
.type _Ux86_64_setcontext, @function
_Ux86_64_setcontext:
#if defined __linux__
/* restore fp state */
mov UC_MCONTEXT_FPREGS_PTR(%rdi),%r8
fldenv (%r8)
ldmxcsr FPREGS_OFFSET_MXCSR(%r8)
#elif defined __FreeBSD__
/* restore fp state */
cmpq $UC_MCONTEXT_FPOWNED_FPU,UC_MCONTEXT_OWNEDFP(%rdi)
jne 1f
cmpq $UC_MCONTEXT_FPFMT_XMM,UC_MCONTEXT_FPFORMAT(%rdi)
jne 1f
fxrstor UC_MCONTEXT_FPSTATE(%rdi)
1:
#else
#error Port me
#endif
/* restore the rest of the state */
mov UC_MCONTEXT_GREGS_R8(%rdi),%r8
mov UC_MCONTEXT_GREGS_R9(%rdi),%r9
mov UC_MCONTEXT_GREGS_RBX(%rdi),%rbx
mov UC_MCONTEXT_GREGS_RBP(%rdi),%rbp
mov UC_MCONTEXT_GREGS_R12(%rdi),%r12
mov UC_MCONTEXT_GREGS_R13(%rdi),%r13
mov UC_MCONTEXT_GREGS_R14(%rdi),%r14
mov UC_MCONTEXT_GREGS_R15(%rdi),%r15
mov UC_MCONTEXT_GREGS_RSI(%rdi),%rsi
mov UC_MCONTEXT_GREGS_RDX(%rdi),%rdx
mov UC_MCONTEXT_GREGS_RAX(%rdi),%rax
mov UC_MCONTEXT_GREGS_RCX(%rdi),%rcx
mov UC_MCONTEXT_GREGS_RSP(%rdi),%rsp
/* push the return address on the stack */
mov UC_MCONTEXT_GREGS_RIP(%rdi),%rcx
push %rcx
mov UC_MCONTEXT_GREGS_RCX(%rdi),%rcx
mov UC_MCONTEXT_GREGS_RDI(%rdi),%rdi
retq
.size _Ux86_64_setcontext, . - _Ux86_64_setcontext
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
Inco-fhevm/inco-sgx-enclave-poc | 1,438 | sgxvm/sgx-sdk/sgx_unwind/libunwind/src/x86_64/siglongjmp.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2004 Hewlett-Packard Co
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
.globl _UI_siglongjmp_cont
.type _UI_siglongjmp_cont, @function
_UI_siglongjmp_cont:
retq
.size _UI_siglongjmp_cont, . - _UI_siglongjmp_cont
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
Inco-fhevm/inco-sgx-enclave-poc | 4,427 | sgxvm/sgx-sdk/sgx_unwind/libunwind/src/x86_64/getcontext.S | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 Google, Inc
Contributed by Paul Pluzhnikov <ppluzhnikov@google.com>
Copyright (C) 2010 Konstantin Belousov <kib@freebsd.org>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "ucontext_i.h"
/* int _Ux86_64_getcontext (ucontext_t *ucp)
Saves the machine context in UCP necessary for libunwind.
Unlike the libc implementation, we don't save the signal mask
and hence avoid the cost of a system call per unwind.
*/
.global _Ux86_64_getcontext
.type _Ux86_64_getcontext, @function
_Ux86_64_getcontext:
.cfi_startproc
/* Callee saved: RBX, RBP, R12-R15 */
movq %r12, UC_MCONTEXT_GREGS_R12(%rdi)
movq %r13, UC_MCONTEXT_GREGS_R13(%rdi)
movq %r14, UC_MCONTEXT_GREGS_R14(%rdi)
movq %r15, UC_MCONTEXT_GREGS_R15(%rdi)
movq %rbp, UC_MCONTEXT_GREGS_RBP(%rdi)
movq %rbx, UC_MCONTEXT_GREGS_RBX(%rdi)
/* Save argument registers (not strictly needed, but setcontext
restores them, so don't restore garbage). */
movq %r8, UC_MCONTEXT_GREGS_R8(%rdi)
movq %r9, UC_MCONTEXT_GREGS_R9(%rdi)
movq %rdi, UC_MCONTEXT_GREGS_RDI(%rdi)
movq %rsi, UC_MCONTEXT_GREGS_RSI(%rdi)
movq %rdx, UC_MCONTEXT_GREGS_RDX(%rdi)
movq %rax, UC_MCONTEXT_GREGS_RAX(%rdi)
movq %rcx, UC_MCONTEXT_GREGS_RCX(%rdi)
#if defined __linux__
/* Save fp state (not needed, except for setcontext not
restoring garbage). */
leaq UC_MCONTEXT_FPREGS_MEM(%rdi),%r8
movq %r8, UC_MCONTEXT_FPREGS_PTR(%rdi)
fnstenv (%r8)
stmxcsr FPREGS_OFFSET_MXCSR(%r8)
#elif defined __FreeBSD__
fxsave UC_MCONTEXT_FPSTATE(%rdi)
movq $UC_MCONTEXT_FPOWNED_FPU,UC_MCONTEXT_OWNEDFP(%rdi)
movq $UC_MCONTEXT_FPFMT_XMM,UC_MCONTEXT_FPFORMAT(%rdi)
/* Save rflags and segment registers, so that sigreturn(2)
does not complain. */
pushfq
.cfi_adjust_cfa_offset 8
popq UC_MCONTEXT_RFLAGS(%rdi)
.cfi_adjust_cfa_offset -8
movl $0, UC_MCONTEXT_FLAGS(%rdi)
movw %cs, UC_MCONTEXT_CS(%rdi)
movw %ss, UC_MCONTEXT_SS(%rdi)
#if 0
/* Setting the flags to 0 above disables restore of segment
registers from the context */
movw %ds, UC_MCONTEXT_DS(%rdi)
movw %es, UC_MCONTEXT_ES(%rdi)
movw %fs, UC_MCONTEXT_FS(%rdi)
movw %gs, UC_MCONTEXT_GS(%rdi)
#endif
movq $UC_MCONTEXT_MC_LEN_VAL, UC_MCONTEXT_MC_LEN(%rdi)
#else
#error Port me
#endif
leaq 8(%rsp), %rax /* exclude this call. */
movq %rax, UC_MCONTEXT_GREGS_RSP(%rdi)
movq 0(%rsp), %rax
movq %rax, UC_MCONTEXT_GREGS_RIP(%rdi)
xorq %rax, %rax
retq
.cfi_endproc
.size _Ux86_64_getcontext, . - _Ux86_64_getcontext
/* int _Ux86_64_getcontext_trace (ucontext_t *ucp)
Saves limited machine context in UCP necessary for libunwind.
Unlike _Ux86_64_getcontext, saves only the parts needed for
fast trace. If fast trace fails, caller will have to get the
full context.
*/
.global _Ux86_64_getcontext_trace
.hidden _Ux86_64_getcontext_trace
.type _Ux86_64_getcontext_trace, @function
_Ux86_64_getcontext_trace:
.cfi_startproc
/* Save only RBP, RBX, RSP, RIP - exclude this call. */
movq %rbp, UC_MCONTEXT_GREGS_RBP(%rdi)
movq %rbx, UC_MCONTEXT_GREGS_RBX(%rdi)
leaq 8(%rsp), %rax
movq %rax, UC_MCONTEXT_GREGS_RSP(%rdi)
movq 0(%rsp), %rax
movq %rax, UC_MCONTEXT_GREGS_RIP(%rdi)
xorq %rax, %rax
retq
.cfi_endproc
.size _Ux86_64_getcontext_trace, . - _Ux86_64_getcontext_trace
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.